add missing tatum

This commit is contained in:
tangxifan 2020-01-03 22:42:17 -05:00
parent 7a96f866bb
commit 60cbcf9104
129 changed files with 16639 additions and 0 deletions

49
libs/EXTERNAL/libtatum/.gitignore vendored Normal file
View File

@ -0,0 +1,49 @@
#
# Project Related Files
#
#Executables
#Generated files
build*/
core
massif.out*
*.log
#YCM
.ycm_extra_conf.py
#gprof
gmon.out
#valgrind
callgrind*
#ctags
./tags
#
# C++ related files
#
# Compiled Object files
*.slo
*.lo
*.o
# Compiled Dynamic libraries
*.so
*.dylib
# Compiled Static libraries
*.lai
*.la
*.a
# Dependency files
*.d
#
# Python temp files
#
*.pyc

View File

@ -0,0 +1,46 @@
language: cpp
dist: trusty #Ubuntu 14.04 by default
sudo: false #Use container based infrastructure
matrix:
include:
#Extensive testing for base compiler
- env: TESTS="basic mcnc20" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-5 CXX=g++-5"
addons: { apt: { packages: ["cmake", "g++-5", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } }
- env: TESTS="basic mcnc20" TATUM_EXECUTION_ENGINE=serial MATRIX_EVAL="CC=gcc-5 CXX=g++-5"
addons: { apt: { packages: ["cmake", "g++-5", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } }
- env: TESTS="basic mcnc20" TATUM_EXECUTION_ENGINE=tbb MATRIX_EVAL="CC=gcc-5 CXX=g++-5"
addons: { apt: { packages: ["cmake", "g++-5", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } }
#Simple testing for other compilers
- env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-8 CXX=g++-8"
addons: { apt: { packages: ["cmake", "g++-8", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } }
- env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-7 CXX=g++-7"
addons: { apt: { packages: ["cmake", "g++-7", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } }
- env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-6 CXX=g++-6"
addons: { apt: { packages: ["cmake", "g++-6", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } }
- env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-4.9 CXX=g++-4.9"
addons: { apt: { packages: ["cmake", "g++-4.9", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } }
- env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=clang-3.5 CXX=clang++-3.5"
addons: { apt: { packages: ["cmake", "clang-3.5", "g++-4.9", "libtbb-dev"], sources: ["llvm-toolchain-trusty-3.5", "ubuntu-toolchain-r-test"] } }
- env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=clang-5.0 CXX=clang++-5.0"
addons: { apt: { packages: ["cmake", "clang-5.0", "g++-4.9", "libtbb-dev"], sources: ["llvm-toolchain-trusty-5.0", "ubuntu-toolchain-r-test"] } }
before_install:
- eval "${MATRIX_EVAL}" #Set compiler versions
- echo $CC
- echo $CXX
script:
#Build
- mkdir -p build && pushd build && cmake .. -DTATUM_EXECUTION_ENGINE=$TATUM_EXECUTION_ENGINE && make -j2 && popd
#Test
- ./scripts/reg_test.py --tatum_test_exec build/tatum_test/tatum_test --tatum_nworkers 2 $TESTS

View File

@ -0,0 +1,37 @@
cmake_minimum_required(VERSION 3.9)
project("tatum")
set(TATUM_EXECUTION_ENGINE "auto" CACHE STRING "Specify the framework for (potential) parallel execution")
set_property(CACHE TATUM_EXECUTION_ENGINE PROPERTY STRINGS auto serial tbb)
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules")
if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
#Set the default build type if not specified
if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release CACHE STRING
"Choose the type of build: None, Debug, Release, RelWithDebInfo, MinSizeRel"
FORCE)
endif()
message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
#Only set compiler flags if not a sub-project
set(WARN_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wcast-align -Wshadow -Wformat=2 -Wlogical-op -Wmissing-declarations -Wmissing-include-dirs -Wredundant-decls -Wswitch-default -Wundef -Wunused-variable -Wdisabled-optimization -Wnoexcept -Woverloaded-virtual -Wctor-dtor-privacy -Wnon-virtual-dtor)
add_compile_options(${WARN_FLAGS})
add_compile_options(-std=c++14)
set(FLEX_BISON_WARN_SUPPRESS_FLAGS -Wno-switch-default -Wno-unused-parameter -Wno-sign-compare -Wno-missing-declarations)
endif()
add_subdirectory(libtatum)
if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR})
#Only build the parser, test executable and docs if not a sub-project
add_subdirectory(tatum_test)
add_subdirectory(libtatumparse)
add_subdirectory(tatumparse_test)
add_subdirectory(doc)
endif()

View File

@ -0,0 +1,21 @@
The MIT License (MIT)
Copyright (c) 2015 Kevin Murray
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -0,0 +1,68 @@
#This is a simple wrapper hiding cmake from non-expert end users.
#
# It supports the targets:
# 'make' - builds everything (all libaries/executables)
# 'make clean' - removes generated build objects/libraries/executables etc.
# 'make distclean' - will clean everything including the cmake generated build files
#
# All other targets (e.g. 'make tatum_test') are passed to the cmake generated makefile
# and processed according to the CMakeLists.txt.
#
# To perform a debug build use:
# 'make BUILD_TYPE=debug'
#Default build type
# Possible values:
# release
# debug
BUILD_TYPE = release
#Allows users to pass parameters to cmake
# e.g. make CMAKE_PARAMS="-DVTR_ENABLE_SANITIZE=true"
override CMAKE_PARAMS := -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -G 'Unix Makefiles' ${CMAKE_PARAMS}
# -s : Suppresss makefile output (e.g. entering/leaving directories)
# --output-sync target : For parallel compilation ensure output for each target is synchronized (make version >= 4.0)
MAKEFLAGS := -s
BUILD_DIR=./build
GENERATED_MAKEFILE := $(BUILD_DIR)/Makefile
#Check for the cmake exectuable
CMAKE := $(shell command -v cmake 2> /dev/null)
#Show test log on failures with 'make test'
export CTEST_OUTPUT_ON_FAILURE=TRUE
#All targets in this make file are always out of date.
# This ensures that any make target requests are forwarded to
# the generated makefile
.PHONY: all distclean $(GENERATED_MAKEFILE) $(MAKECMDGOALS)
#Build everything
all: $(GENERATED_MAKEFILE)
@+$(MAKE) -C $(BUILD_DIR)
#Call the generated Makefile's clean, and then remove all cmake generated files
distclean: $(GENERATED_MAKEFILE)
@ echo "Cleaning build..."
@+$(MAKE) -C $(BUILD_DIR) clean
@ echo "Removing build system files.."
@ rm -rf $(BUILD_DIR)
@ rm -rf CMakeFiles CMakeCache.txt #In case 'cmake .' was run in the source directory
#Call cmake to generate the main Makefile
$(GENERATED_MAKEFILE):
ifeq ($(CMAKE),)
$(error Required 'cmake' executable not found. On debian/ubuntu try 'sudo apt-get install cmake' to install)
endif
@ mkdir -p $(BUILD_DIR)
echo "cd $(BUILD_DIR) && $(CMAKE) $(CMAKE_PARAMS) .. "
cd $(BUILD_DIR) && $(CMAKE) $(CMAKE_PARAMS) ..
#Forward any targets that are not named 'distclean' to the generated Makefile
ifneq ($(MAKECMDGOALS),distclean)
$(MAKECMDGOALS): $(GENERATED_MAKEFILE)
@+$(MAKE) -C $(BUILD_DIR) $(MAKECMDGOALS)
endif

View File

@ -0,0 +1,58 @@
# Tatum: A Fast, Flexible Static Timing Analysis (STA) Engine for Digital Circuits
[![Build Status](https://travis-ci.org/kmurray/tatum.svg?branch=master)](https://travis-ci.org/kmurray/tatum)
## Overview
Tatum is a block-based Static Timing Analysis (STA) engine suitable for integration with Computer-Aided Design (CAD) tools, which analyze, implement and optimize digital circuits.
Tatum supports both setup (max-delay) and hold (min-delay) analysis, clock skew, multiple clocks and a variety of timing exceptions.
Tatum is provided as a library (`libtatum`) which can be easily integrated into the host application.
Tatum operates on an abstract *timing graph* constructed by the host application, and can be configured to use an application defined delay calculator.
Tatum is optimized for high performance, as required by optimizing CAD tools.
In particular:
* Tatum performs only a *single* set of graph traversals to calculate timing information for all clocks and analyses (setup and hold).
* Tatum's data structures are cache optimized
* Tatum supports parallel analysis using multiple CPU cores
## How to Cite
If your work uses Tatum please cite the following as a general citation:
K. E. Murray and V. Betz, "Tatum: Parallel Timing Analysis for Faster Design Cycles and Improved Optimization", *IEEE International Conference on Field-Programmable Technology (FPT)*, 2018
**Bibtex:**
```
@inproceedings{c:tatum,
author = {Murray, Kevin E. and Betz, Vaughn},
title = {Tatum: Parallel Timing Analysis for Faster Design Cycles and Improved Optimization},
booktitle = {IEEE International Conference on Field-Programmable Technology (FPT)},
year = {2018}
}
```
## Documentation
Comming soon.
## Download
Comming soon.
## Projects using Tatum
Tatum is designed to be re-usable in a variety of appliations.
Some of the known uses are:
* The [Verilog to Routing (VTR)](https://verilogtorouting.org) project for Field-Programmable Gate Array (FPGA) Architecture and CAD research. Tatum is used as the STA engine in the VPR placement and routing tool.
* The [CGRA-ME](http://cgra-me.ece.utoronto.ca/) framework for Coarse-Grained Reconfigurable Array (CGRA) Architecture research.
*If your project is using Tatum please let us know!*
## History
### Why was Tatum created?
I had need for a high performance, flexible STA engine for my research into FPGA architecture and CAD tools.
I could find no suitable open source STA engines, and wrote my own.
### Name Origin
A *tatum* is a unit of time used in the computational analysis of music \[[1]\], named after Jazz pianist [Art Tatum](https://en.wikipedia.org/wiki/Art_Tatum).
[1]: http://web.media.mit.edu/~tristan/phd/dissertation/chapter3.html#x1-390003.4.3

View File

@ -0,0 +1 @@
theme: jekyll-theme-minimal

View File

@ -0,0 +1,303 @@
# The MIT License (MIT)
#
# Copyright (c) 2015 Justus Calvin
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# FindTBB
# -------
#
# Find TBB include directories and libraries.
#
# Usage:
#
# find_package(TBB [major[.minor]] [EXACT]
# [QUIET] [REQUIRED]
# [[COMPONENTS] [components...]]
# [OPTIONAL_COMPONENTS components...])
#
# where the allowed components are tbbmalloc and tbb_preview. Users may modify
# the behavior of this module with the following variables:
#
# * TBB_ROOT_DIR - The base directory the of TBB installation.
# * TBB_INCLUDE_DIR - The directory that contains the TBB headers files.
# * TBB_LIBRARY - The directory that contains the TBB library files.
# * TBB_<library>_LIBRARY - The path of the TBB the corresponding TBB library.
# These libraries, if specified, override the
# corresponding library search results, where <library>
# may be tbb, tbb_debug, tbbmalloc, tbbmalloc_debug,
# tbb_preview, or tbb_preview_debug.
# * TBB_USE_DEBUG_BUILD - The debug version of tbb libraries, if present, will
# be used instead of the release version.
#
# Users may modify the behavior of this module with the following environment
# variables:
#
# * TBB_INSTALL_DIR
# * TBBROOT
# * LIBRARY_PATH
#
# This module will set the following variables:
#
# * TBB_FOUND - Set to false, or undefined, if we havent found, or
# dont want to use TBB.
# * TBB_<component>_FOUND - If False, optional <component> part of TBB sytem is
# not available.
# * TBB_VERSION - The full version string
# * TBB_VERSION_MAJOR - The major version
# * TBB_VERSION_MINOR - The minor version
# * TBB_INTERFACE_VERSION - The interface version number defined in
# tbb/tbb_stddef.h.
# * TBB_<library>_LIBRARY_RELEASE - The path of the TBB release version of
# <library>, where <library> may be tbb, tbb_debug,
# tbbmalloc, tbbmalloc_debug, tbb_preview, or
# tbb_preview_debug.
# * TBB_<library>_LIBRARY_DEGUG - The path of the TBB release version of
# <library>, where <library> may be tbb, tbb_debug,
# tbbmalloc, tbbmalloc_debug, tbb_preview, or
# tbb_preview_debug.
#
# The following varibles should be used to build and link with TBB:
#
# * TBB_INCLUDE_DIRS - The include directory for TBB.
# * TBB_LIBRARIES - The libraries to link against to use TBB.
# * TBB_LIBRARIES_RELEASE - The release libraries to link against to use TBB.
# * TBB_LIBRARIES_DEBUG - The debug libraries to link against to use TBB.
# * TBB_DEFINITIONS - Definitions to use when compiling code that uses
# TBB.
# * TBB_DEFINITIONS_RELEASE - Definitions to use when compiling release code that
# uses TBB.
# * TBB_DEFINITIONS_DEBUG - Definitions to use when compiling debug code that
# uses TBB.
#
# This module will also create the "tbb" target that may be used when building
# executables and libraries.
include(FindPackageHandleStandardArgs)
if(NOT TBB_FOUND)
##################################
# Check the build type
##################################
if(NOT DEFINED TBB_USE_DEBUG_BUILD)
if(CMAKE_BUILD_TYPE MATCHES "(Debug|DEBUG|debug|RelWithDebInfo|RELWITHDEBINFO|relwithdebinfo)")
set(TBB_BUILD_TYPE DEBUG)
else()
set(TBB_BUILD_TYPE RELEASE)
endif()
elseif(TBB_USE_DEBUG_BUILD)
set(TBB_BUILD_TYPE DEBUG)
else()
set(TBB_BUILD_TYPE RELEASE)
endif()
##################################
# Set the TBB search directories
##################################
# Define search paths based on user input and environment variables
set(TBB_SEARCH_DIR ${TBB_ROOT_DIR} $ENV{TBB_INSTALL_DIR} $ENV{TBBROOT})
# Define the search directories based on the current platform
if(CMAKE_SYSTEM_NAME STREQUAL "Windows")
set(TBB_DEFAULT_SEARCH_DIR "C:/Program Files/Intel/TBB"
"C:/Program Files (x86)/Intel/TBB")
# Set the target architecture
if(CMAKE_SIZEOF_VOID_P EQUAL 8)
set(TBB_ARCHITECTURE "intel64")
else()
set(TBB_ARCHITECTURE "ia32")
endif()
# Set the TBB search library path search suffix based on the version of VC
if(WINDOWS_STORE)
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc11_ui")
elseif(MSVC14)
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc14")
elseif(MSVC12)
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc12")
elseif(MSVC11)
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc11")
elseif(MSVC10)
set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc10")
endif()
# Add the library path search suffix for the VC independent version of TBB
list(APPEND TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc_mt")
elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin")
# OS X
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb")
# TODO: Check to see which C++ library is being used by the compiler.
if(NOT ${CMAKE_SYSTEM_VERSION} VERSION_LESS 13.0)
# The default C++ library on OS X 10.9 and later is libc++
set(TBB_LIB_PATH_SUFFIX "lib/libc++" "lib")
else()
set(TBB_LIB_PATH_SUFFIX "lib")
endif()
elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux")
# Linux
set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb")
# TODO: Check compiler version to see the suffix should be <arch>/gcc4.1 or
# <arch>/gcc4.1. For now, assume that the compiler is more recent than
# gcc 4.4.x or later.
if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
set(TBB_LIB_PATH_SUFFIX "lib/intel64/gcc4.4")
elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$")
set(TBB_LIB_PATH_SUFFIX "lib/ia32/gcc4.4")
endif()
endif()
##################################
# Find the TBB include dir
##################################
find_path(TBB_INCLUDE_DIRS tbb/tbb.h
HINTS ${TBB_INCLUDE_DIR} ${TBB_SEARCH_DIR}
PATHS ${TBB_DEFAULT_SEARCH_DIR}
PATH_SUFFIXES include)
##################################
# Set version strings
##################################
if(TBB_INCLUDE_DIRS)
file(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _tbb_version_file)
string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1"
TBB_VERSION_MAJOR "${_tbb_version_file}")
string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1"
TBB_VERSION_MINOR "${_tbb_version_file}")
string(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1"
TBB_INTERFACE_VERSION "${_tbb_version_file}")
set(TBB_VERSION "${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}")
endif()
##################################
# Find TBB components
##################################
if(TBB_VERSION VERSION_LESS 4.3)
set(TBB_SEARCH_COMPOMPONENTS tbb_preview tbbmalloc tbb)
else()
set(TBB_SEARCH_COMPOMPONENTS tbb_preview tbbmalloc_proxy tbbmalloc tbb)
endif()
# Find each component
foreach(_comp ${TBB_SEARCH_COMPOMPONENTS})
if(";${TBB_FIND_COMPONENTS};tbb;" MATCHES ";${_comp};")
# Search for the libraries
find_library(TBB_${_comp}_LIBRARY_RELEASE ${_comp}
HINTS ${TBB_LIBRARY} ${TBB_SEARCH_DIR}
PATHS ${TBB_DEFAULT_SEARCH_DIR} ENV LIBRARY_PATH
PATH_SUFFIXES ${TBB_LIB_PATH_SUFFIX})
find_library(TBB_${_comp}_LIBRARY_DEBUG ${_comp}_debug
HINTS ${TBB_LIBRARY} ${TBB_SEARCH_DIR}
PATHS ${TBB_DEFAULT_SEARCH_DIR} ENV LIBRARY_PATH
PATH_SUFFIXES ${TBB_LIB_PATH_SUFFIX})
if(TBB_${_comp}_LIBRARY_DEBUG)
list(APPEND TBB_LIBRARIES_DEBUG "${TBB_${_comp}_LIBRARY_DEBUG}")
endif()
if(TBB_${_comp}_LIBRARY_RELEASE)
list(APPEND TBB_LIBRARIES_RELEASE "${TBB_${_comp}_LIBRARY_RELEASE}")
endif()
if(TBB_${_comp}_LIBRARY_${TBB_BUILD_TYPE} AND NOT TBB_${_comp}_LIBRARY)
set(TBB_${_comp}_LIBRARY "${TBB_${_comp}_LIBRARY_${TBB_BUILD_TYPE}}")
endif()
if(TBB_${_comp}_LIBRARY AND EXISTS "${TBB_${_comp}_LIBRARY}")
set(TBB_${_comp}_FOUND TRUE)
else()
set(TBB_${_comp}_FOUND FALSE)
endif()
# Mark internal variables as advanced
mark_as_advanced(TBB_${_comp}_LIBRARY_RELEASE)
mark_as_advanced(TBB_${_comp}_LIBRARY_DEBUG)
mark_as_advanced(TBB_${_comp}_LIBRARY)
endif()
endforeach()
##################################
# Set compile flags and libraries
##################################
set(TBB_DEFINITIONS_RELEASE "")
set(TBB_DEFINITIONS_DEBUG "-DTBB_USE_DEBUG=1")
if(TBB_LIBRARIES_${TBB_BUILD_TYPE})
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_${TBB_BUILD_TYPE}}")
set(TBB_LIBRARIES "${TBB_LIBRARIES_${TBB_BUILD_TYPE}}")
elseif(TBB_LIBRARIES_RELEASE)
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_RELEASE}")
set(TBB_LIBRARIES "${TBB_LIBRARIES_RELEASE}")
elseif(TBB_LIBRARIES_DEBUG)
set(TBB_DEFINITIONS "${TBB_DEFINITIONS_DEBUG}")
set(TBB_LIBRARIES "${TBB_LIBRARIES_DEBUG}")
endif()
find_package_handle_standard_args(TBB
REQUIRED_VARS TBB_INCLUDE_DIRS TBB_LIBRARIES
HANDLE_COMPONENTS
VERSION_VAR TBB_VERSION)
##################################
# Create targets
##################################
if(NOT CMAKE_VERSION VERSION_LESS 3.0 AND TBB_FOUND)
add_library(tbb SHARED IMPORTED)
set_target_properties(tbb PROPERTIES
INTERFACE_INCLUDE_DIRECTORIES ${TBB_INCLUDE_DIRS}
IMPORTED_LOCATION ${TBB_LIBRARIES})
if(TBB_LIBRARIES_RELEASE AND TBB_LIBRARIES_DEBUG)
set_target_properties(tbb PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "$<$<OR:$<CONFIG:Debug>,$<CONFIG:RelWithDebInfo>>:TBB_USE_DEBUG=1>"
IMPORTED_LOCATION_DEBUG ${TBB_LIBRARIES_DEBUG}
IMPORTED_LOCATION_RELWITHDEBINFO ${TBB_LIBRARIES_DEBUG}
IMPORTED_LOCATION_RELEASE ${TBB_LIBRARIES_RELEASE}
IMPORTED_LOCATION_MINSIZEREL ${TBB_LIBRARIES_RELEASE}
)
elseif(TBB_LIBRARIES_RELEASE)
set_target_properties(tbb PROPERTIES IMPORTED_LOCATION ${TBB_LIBRARIES_RELEASE})
else()
set_target_properties(tbb PROPERTIES
INTERFACE_COMPILE_DEFINITIONS "${TBB_DEFINITIONS_DEBUG}"
IMPORTED_LOCATION ${TBB_LIBRARIES_DEBUG}
)
endif()
endif()
mark_as_advanced(TBB_INCLUDE_DIRS TBB_LIBRARIES)
unset(TBB_ARCHITECTURE)
unset(TBB_BUILD_TYPE)
unset(TBB_LIB_PATH_SUFFIX)
unset(TBB_DEFAULT_SEARCH_DIR)
endif()

View File

@ -0,0 +1,18 @@
#find_package(Doxygen)
#if(DOXYGEN_FOUND)
#set(doxyfile_in ${CMAKE_CURRENT_SOURCE_DIR}/doxyfile.in)
#set(doxyfile ${CMAKE_CURRENT_BINARY_DIR}/doxyfile)
#configure_file(${doxyfile_in} ${doxyfile} @ONLY)
#add_custom_target(doc
#COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile}
#WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}
#COMMENT "Generating API Documentation with Doxygen"
#VERBATIM)
#else()
#message(INFO "Doxygen not found. Documentation will not be built")
#endif()

View File

@ -0,0 +1,106 @@
# Doxyfile 1.8.6
# This file describes the settings to be used by the documentation system
# doxygen (www.doxygen.org) for a project.
#
# All text after a double hash (##) is considered a comment and is placed in
# front of the TAG it is preceding.
#
# All text after a single hash (#) is considered a comment and will be ignored.
# The format is:
# TAG = value [value, ...]
# For lists, items can also be appended using:
# TAG += value [value, ...]
# Values that contain spaces should be placed between quotes (\" \").
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
# This tag specifies the encoding used for all characters in the config file
# that follow. The default is UTF-8 which is also the encoding used for all text
# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
# for the list of possible encodings.
# The default value is: UTF-8.
DOXYFILE_ENCODING = UTF-8
# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
# double-quotes, unless you are using Doxywizard) that should identify the
# project for which the documentation is generated. This name is used in the
# title of most generated pages and in a few other places.
# The default value is: My Project.
PROJECT_NAME = "@CMAKE_PROJECT_NAME@"
# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
# could be handy for archiving the generated documentation or if some version
# control system is used.
PROJECT_NUMBER =
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
# quick idea about the purpose of the project. Keep the description short.
PROJECT_BRIEF = "A Fast, Flexible Static Timing Analysis (STA) Engine for Digital Circuits"
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
# part of the path. The tag can be used to show relative paths in the file list.
# If left blank the directory from which doxygen is run is used as the path to
# strip.
#
# Note that you can specify absolute paths here, but also relative paths, which
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
STRIP_FROM_PATH = @PROJECT_SOURCE_DIR@ @PROJECT_BINARY_DIR@
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
# The INPUT tag is used to specify the files and/or directories that contain
# documented source files. You may enter file names like myfile.cpp or
# directories like /usr/src/myproject. Separate the files or directories with
# spaces.
# Note: If this tag is empty the current directory is searched.
INPUT = @PROJECT_SOURCE_DIR@/src/libtatum @PROJECT_SOURCE_DIR@/README.md
# If the value of the INPUT tag contains directories, you can use the
# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
# *.h) to filter out the source-files in the directories. If left blank the
# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
# *.qsf, *.as and *.js.
FILE_PATTERNS = *.h *.hpp *.c *.cpp *.tpp *.inl
# The RECURSIVE tag can be used to specify whether or not subdirectories should
# be searched for input files as well.
# The default value is: NO.
RECURSIVE = YES
# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
# is part of the input, its contents will be placed on the main page
# (index.html). This can be useful if you have a project on for instance GitHub
# and want to reuse the introduction page also for the doxygen output.
USE_MDFILE_AS_MAINPAGE = README.md
# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
# http://www.mathjax.org) which uses client side Javascript for the rendering
# instead of using prerendered bitmaps. Use this if you do not have LaTeX
# installed or if you want to formulas look prettier in the HTML output. When
# enabled you may also need to install MathJax separately and configure the path
# to it using the MATHJAX_RELPATH option.
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = YES

View File

@ -0,0 +1,87 @@
project("libtatum")
#
#
#Check for parallel execution framework support
#
#
set(TBB_SUPPORTED FALSE)
#Check for Thread Building Blocks support
find_package(TBB)
if (TBB_FOUND)
set(TBB_SUPPORTED TRUE)
endif()
#
#
# Determine parallel execution framework
#
#
set(TATUM_USE_EXECUTION_ENGINE "") #The actual execution engine to use (based on what is available)
if (TATUM_EXECUTION_ENGINE STREQUAL "auto")
#Pick the best supported execution engine
if (TBB_SUPPORTED)
set(TATUM_USE_EXECUTION_ENGINE "tbb")
else()
set(TATUM_USE_EXECUTION_ENGINE "serial")
endif()
else()
#The user requested a specific execution engine
if (TATUM_EXECUTION_ENGINE STREQUAL "tbb")
if (NOT TBB_SUPPORTED)
message(FATAL_ERROR "Tatum: Requested execution engine '${TATUM_EXECUTION_ENGINE}' not found")
endif()
elseif (TATUM_EXECUTION_ENGINE STREQUAL "serial")
#Pass
else()
message(FATAL_ERROR "Tatum: Unrecognized execution engine '${TATUM_EXECUTION_ENGINE}'")
endif()
#Set the engine to use (it must be valid or we would have errored out)
set(TATUM_USE_EXECUTION_ENGINE "${TATUM_EXECUTION_ENGINE}")
endif()
#
#
# Build files configuration
#
#
#Source files for the library
file(GLOB_RECURSE LIB_TATUM_SOURCES *.cpp)
file(GLOB_RECURSE LIB_TATUM_HEADERS *.hpp)
#Include directories
set(LIB_TATUM_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR})
#
#
# Define the actual build targets
#
#
#Define the library
add_library(libtatum STATIC ${LIB_TATUM_SOURCES} ${LIB_TATUM_HEADERS})
set_target_properties(libtatum PROPERTIES PREFIX "") #Avoid extra 'lib' prefix
#Export library headers
target_include_directories(libtatum PUBLIC ${LIB_TATUM_INCLUDE_DIRS})
#Setup parallel execution
if (TATUM_USE_EXECUTION_ENGINE STREQUAL "tbb")
message(STATUS "Tatum: will support parallel execution using '${TATUM_USE_EXECUTION_ENGINE}'")
target_compile_definitions(libtatum PUBLIC TATUM_USE_TBB)
target_link_libraries(libtatum tbb)
target_link_libraries(libtatum tbbmalloc_proxy) #Use the scalable memory allocator
elseif (TATUM_USE_EXECUTION_ENGINE STREQUAL "serial")
#Nothing to do
message(STATUS "Tatum: will support only serial execution")
else()
message(FATAL_ERROR "Tatum: Unrecognized concrete execution engine '${TATUM_USE_EXECUTION_ENGINE}'")
endif()

View File

@ -0,0 +1,18 @@
#ifndef TATUM_HPP
#define TATUM_HPP
#include "tatum_fwd.hpp"
//Data structures
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
//Analyzers
#include "tatum/timing_analyzers.hpp"
#include "tatum/graph_walkers.hpp"
#include "tatum/analyzer_factory.hpp"
//Reporting
#include "tatum/TimingReporter.hpp"
#endif

View File

@ -0,0 +1,72 @@
#pragma once
#include <memory>
#include <vector>
#include "tatum/tags/TimingTags.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
#include "tatum/graph_visitors/CommonAnalysisVisitor.hpp"
#include "tatum/graph_visitors/HoldAnalysisOps.hpp"
#include "tatum/util/tatum_assert.hpp"
namespace tatum {
/** \file
* The 'HoldAnalysis' class defines the operations needed by a timing analyzer class
* to perform a hold (min/shortest path) analysis.
*
* \see SetupAnalysis
* \see TimingAnalyzer
*
* Hold Analysis Principles
* ==========================
*
* In addition to satisfying setup constraints, data arriving at a Flip-Flop (FF) must stay (i.e.
* remain stable) for some amount of time *after* the capturing clock edge arrives. This time is
* referred to as the 'Hold Time' of the Flip-Flop, \f$ t_h \f$. If the data changes during the
* hold window (i.e. less than \f$ t_h \f$ after the capturing clock edge) then the FF may go
* meta-stable failing to capture the data. This will put the circuit in an invalid state (this
* is bad).
*
* More formally, for correct operation at every cycle we require the following to be satisfied
* for every path in the circuit:
*
* \f[
* t_{clk\_insrt}^{(launch)} + t_{cq}^{(min)} + t_{comb}^{(min)} \geq t_{clk\_insrt}^{(capture)} + t_h (1)
* \f]
*
* where \f$ t_{clk\_insrt}^{(launch)}, t_{clk\_insrt}^{(capture)} \f$ are the up/downstream FF clock insertion
* delays, \f$ t_{cq}^{(min)} \f$ is the minimum clock-to-q delay of the upstream FF, \f$ t_{comb}^{(min)} \f$ is
* the minimum combinational path delay from the upstream to downstream FFs, and \f$ t_h \f$ is the hold
* constraint of the downstream FF.
*
* Note that unlike in setup analysis this behaviour is indepenant of clock period.
* Intuitively, hold analysis can be viewed as data from the upstream FF trampling the data launched
* on the previous cycle before it can be captured by the downstream FF.
*/
/** \class HoldAnalysis
*
* The 'HoldAnalysis' class defines the operations needed by a timing analyzer
* to perform a hold (min/shortest path) analysis.
*
* \see SetupAnalysis
* \see TimingAnalyzer
* \see CommonAnalysisVisitor
*/
class HoldAnalysis : public detail::CommonAnalysisVisitor<detail::HoldAnalysisOps> {
public:
HoldAnalysis(size_t num_tags, size_t num_slacks)
: detail::CommonAnalysisVisitor<detail::HoldAnalysisOps>(num_tags, num_slacks) {}
TimingTags::tag_range hold_tags(const NodeId node) const { return ops_.get_tags(node); }
TimingTags::tag_range hold_tags(const NodeId node, TagType type) const { return ops_.get_tags(node, type); }
TimingTags::tag_range hold_edge_slacks(const EdgeId edge) const { return ops_.get_edge_slacks(edge); }
TimingTags::tag_range hold_node_slacks(const NodeId node) const { return ops_.get_node_slacks(node); }
};
} //namepsace

View File

@ -0,0 +1,139 @@
#pragma once
#include <memory>
#include <vector>
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
#include "tatum/tags/TimingTags.hpp"
#include "tatum/graph_visitors/CommonAnalysisVisitor.hpp"
#include "tatum/graph_visitors/SetupAnalysisOps.hpp"
#include "tatum/util/tatum_assert.hpp"
namespace tatum {
/** \file
* The 'SetupAnalysis' class defines the operations needed by a GraphWalker class
* to perform a setup (max/longest path) analysis. It satisifes and extends the GraphVisitor
* concept class.
*
* Setup Analysis Principles
* ==========================
* To operate correctly data arriving at a Flip-Flop (FF) must arrive (i.e. be stable) some
* amount of time BEFORE the capturing clock edge. This time is referred to as the
* 'Setup Time' of the Flip-Flop. If the data arrives during the setup window
* (i.e. less than \f$ t_s \f$ before the capturing clock edge) then the FF may go meta-stable
* failing to capture the data. This will put the circuit in an invalid state (this is bad).
*
* More formally, for correct operation at every cycle we require the following to be satisfied
* for every path in the circuit:
*
* \f[
* t_{clock}^{(launch)} + t_{cq}^{(max)} + t_{comb}^{(max)} \leq t_{clock}^{(capture)} - t_s (1)
* \f]
*
* where \f$ t_{clock}^{(launch)} \f$ is the clock arrival time at the upstream FF, \f$ t_{cq}^{(max)} \f$ is the
* maximum clock-to-q delay of the upstream FF, and \f$ t_{comb}^{(max)} \f$ is the maximum combinational
* path delay from the upstream to downstream FFs, \f$ t_s \f$ is the setup constraint of the downstream
* FF, and \f$ t_{clock}^{(capture)} \f$ is the clock arrival time at the downstream FF.
*
* Typically \f$ t_{clock}^{(launch)} \f$ and \f$ t_{clock}^{(capture)} \f$ have a periodic relationship.
* To ensure a non-optimistic analysis we need to consider the minimum possible time difference between
* \f$ t_{clock}^{(capture)} \f$ and \f$ t_{clock}^{(launch)} \f$. In the case where the launch and capture clocks
* are the same this *constraint* (\f$ T_{cstr} \f$) value is simply the clock period (\f$ T_{clk} \f$); however,
* in multi-clock scenarios the closest alignment of clock edges is used, which may be smaller than the clock
* period of either the launch or capture clock (depending on their period and phase relationship). It is
* typically assumed that the launch clock arrives at time zero (even if this is not strictly true
* in an absolute sense, such as if the clock has a rise time > 0, we can achieve this by adjusting
* the value of \f$ T_{cstr} \f$).
*
* Additionally, the arrival times of the launch and capture edges are unlikely to be perfectly
* aligned in practise, due to clock skew.
*
* Formally, we can re-write our condition for correct operation as:
* \f[
* t_{clk\_insrt}^{(launch)} + t_{cq}^{(max)} + t_{comb}^{(max)} \leq t_{clk\_insrt}^{(capture)} - t_s + T_{cstr} (2)
* \f]
*
* where \f$ t_{clk\_insrt}^{(launch)} \f$ and \f$ t_{clk\_insrt}^{(capture)} \f$ represent the clock insertion delays
* to the launch/capture nodes, and \f$ T_{cstr} \f$ the ideal constraint (excluding skew).
*
* We refer to the left hand side of (2) as the 'arrival time' (when the data actually arrives at a FF capture node),
* and the right hand side as the 'required time' (when the data is required to arrive for correct operation), so
* (2) becomes:
* \f[
* t_{arr}^{(max)} \leq t_{req}^{(min)} (3)
* \f]
*/
/**
* Setup Analysis Implementation
* ===============================
* When we perform setup analysis we follow the formulation of (2), by performing two key operations: traversing
* the clock network, and traversing the data paths.
*
* Clock Propogation
* -------------------
* We traverse the clock network to determine the clock delays (\f$ t_{clk\_insrt}^{(launch)} \f$, \f$ t_{clk\_insrt}^{(capture)} \f$)
* at each FF clock pin (FF_CLOCK node in the timing graph). Clock related delay information is stored and
* propogated as sets of 'clock tags'.
*
* Data Propogation
* ------------------
* We traverse the data paths in the circuit to determine \f$ t_{arr}^{(max)} \f$ in (2).
* In particular, at each node in the circuit we track the maximum arrival time to it as a set
* of 'data_tags'.
*
* The timing graph uses separte nodes to represent FF Pins (FF_IPIN, FF_OPIN) and FF Sources/Sinks
* (FF_SOURCE/FF_SINK). As a result \f$ t_{cq} \f$ delays are actually placed on the edges between FF_SOURCEs
* and FF_OPINs, \f$ t_s \f$ values are similarily placed as edge delays between FF_IPINs and FF_SINKs.
*
* The data launch nodes (e.g. FF_SOURCES) have their, arrival times initialized to the clock insertion
* delay (\f$ t_{clk\_insrt}^{(launch)} \f$). Then at each downstream node we store the maximum of the upstream
* arrival time plus the incoming edge delay as the arrival time at each node. As a result the final
* arrival time at a capture node (e.g. FF_SINK) is the maximum arival time (\f$ t_{arr}^{(max)} \f$).
*
*
* The required times at sink nodes (Primary Outputs, e.g. FF_SINKs) can be calculated directly after clock propogation,
* since the value of \f$ T_{cstr} \f$ is determined ahead of time.
*
* To facilitate the calculation of slack at each node we also propogate required times back through
* the timing graph. This follows a similar procedure to arrival propogation but happens in reverse
* order (from POs to PIs), with each node taking the minumum of the downstream required time minus
* the edge delay.
*
* Combined Clock & Data Propogation
* -----------------------------------
* In practice the clock and data propogation, although sometimes logically useful to think of as separate,
* are combined into a single traversal for efficiency (minimizing graph walks). This is enabled by
* building the timing graph with edges between FF_CLOCK and FF_SINK/FF_SOUCE nodes. On the forward traversal
* we propogate clock tags from known clock sources, which are converted to data tags (with appropriate
* *arrival times*) at FF_SOURCE nodes, and data tags (with appropriate *required times*) at FF_SINK nodes.
*
* \see HoldAnalysis
*/
/** \class HoldAnalysis
*
* The 'HoldAnalysis' class defines the operations needed by a timing analyzer
* to perform a hold (min/shortest path) analysis.
*
* \see SetupAnalysis
* \see TimingAnalyzer
* \see CommonAnalysisVisitor
*/
class SetupAnalysis : public detail::CommonAnalysisVisitor<detail::SetupAnalysisOps> {
public:
SetupAnalysis(size_t num_tags, size_t num_slacks)
: detail::CommonAnalysisVisitor<detail::SetupAnalysisOps>(num_tags, num_slacks) {}
TimingTags::tag_range setup_tags(const NodeId node) const { return ops_.get_tags(node); }
TimingTags::tag_range setup_tags(const NodeId node, TagType type) const { return ops_.get_tags(node, type); }
TimingTags::tag_range setup_edge_slacks(const EdgeId edge) const { return ops_.get_edge_slacks(edge); }
TimingTags::tag_range setup_node_slacks(const NodeId node) const { return ops_.get_node_slacks(node); }
};
} //namepsace

View File

@ -0,0 +1,82 @@
#pragma once
#include "SetupAnalysis.hpp"
#include "HoldAnalysis.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
#include "tatum/graph_visitors/GraphVisitor.hpp"
namespace tatum {
/** \class SetupHoldAnalysis
*
* The SetupHoldAnalysis class defines the operations needed by a timing analyzer
* to perform a combinded setup (max/long path) and hold (min/shortest path) analysis.
*
* Performing both analysis simultaneously tends to be more efficient than performing
* them sperately due to cache locality.
*
* \see SetupAnalysis
* \see HoldAnalysis
* \see TimingAnalyzer
*/
class SetupHoldAnalysis : public GraphVisitor {
public:
SetupHoldAnalysis(size_t num_tags, size_t num_slacks)
: setup_visitor_(num_tags, num_slacks)
, hold_visitor_(num_tags, num_slacks) {}
void do_reset_node(const NodeId node_id) override {
setup_visitor_.do_reset_node(node_id);
hold_visitor_.do_reset_node(node_id);
}
void do_reset_edge(const EdgeId edge_id) override {
setup_visitor_.do_reset_edge(edge_id);
hold_visitor_.do_reset_edge(edge_id);
}
bool do_arrival_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) override {
bool setup_unconstrained = setup_visitor_.do_arrival_pre_traverse_node(tg, tc, node_id);
bool hold_unconstrained = hold_visitor_.do_arrival_pre_traverse_node(tg, tc, node_id);
return setup_unconstrained || hold_unconstrained;
}
bool do_required_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) override {
bool setup_unconstrained = setup_visitor_.do_required_pre_traverse_node(tg, tc, node_id);
bool hold_unconstrained = hold_visitor_.do_required_pre_traverse_node(tg, tc, node_id);
return setup_unconstrained || hold_unconstrained;
}
void do_arrival_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) override {
setup_visitor_.do_arrival_traverse_node(tg, tc, dc, node_id);
hold_visitor_.do_arrival_traverse_node(tg, tc, dc, node_id);
}
void do_required_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) override {
setup_visitor_.do_required_traverse_node(tg, tc, dc, node_id);
hold_visitor_.do_required_traverse_node(tg, tc, dc, node_id);
}
void do_slack_traverse_node(const TimingGraph& tg, const DelayCalculator& dc, const NodeId node) override {
setup_visitor_.do_slack_traverse_node(tg, dc, node);
hold_visitor_.do_slack_traverse_node(tg, dc, node);
}
TimingTags::tag_range setup_tags(const NodeId node_id) const { return setup_visitor_.setup_tags(node_id); }
TimingTags::tag_range setup_tags(const NodeId node_id, TagType type) const { return setup_visitor_.setup_tags(node_id, type); }
TimingTags::tag_range setup_edge_slacks(const EdgeId edge_id) const { return setup_visitor_.setup_edge_slacks(edge_id); }
TimingTags::tag_range setup_node_slacks(const NodeId node_id) const { return setup_visitor_.setup_node_slacks(node_id); }
TimingTags::tag_range hold_tags(const NodeId node_id) const { return hold_visitor_.hold_tags(node_id); }
TimingTags::tag_range hold_tags(const NodeId node_id, TagType type) const { return hold_visitor_.hold_tags(node_id, type); }
TimingTags::tag_range hold_edge_slacks(const EdgeId edge_id) const { return hold_visitor_.hold_edge_slacks(edge_id); }
TimingTags::tag_range hold_node_slacks(const NodeId node_id) const { return hold_visitor_.hold_node_slacks(node_id); }
SetupAnalysis& setup_visitor() { return setup_visitor_; }
HoldAnalysis& hold_visitor() { return hold_visitor_; }
private:
SetupAnalysis setup_visitor_;
HoldAnalysis hold_visitor_;
};
} //namepsace

View File

@ -0,0 +1,79 @@
#pragma once
#include <cmath>
#include <array>
#include <iosfwd>
#ifndef TIME_VEC_WIDTH
#define TIME_VEC_WIDTH 1
#endif
/*
* What alignment is required?
*/
#if TIME_VEC_WIDTH > 8
//Required for aligned access with AVX
# define TIME_MEM_ALIGN 8*sizeof(float)
#elif TIME_VEC_WIDTH >= 4
//Required for aligned access with SSE
# define TIME_MEM_ALIGN 4*sizeof(float)
#endif //TIME_VEC_WIDTH
#if TIME_VEC_WIDTH > 1
# include <x86intrin.h>
#endif
namespace tatum {
class Time {
public:
typedef float scalar_type;
public: //Constructors
Time(): Time(NAN) {}
///Initialize from float types
explicit Time(const double time) { set_value(time); }
public: //Accessors
///The current time value
scalar_type value() const;
///Indicates whether the current time value is valid
bool valid() const;
///Updates the time value with the max of itself and other
void max(const Time& other);
///Updates the time value with the min of itself and other
void min(const Time& other);
///Allow conversions to scalar_type (usually float)
operator scalar_type() const { return value(); }
public: //Mutators
///Set the current time value to time
void set_value(scalar_type time);
Time& operator+=(const Time& rhs);
Time& operator-=(const Time& rhs);
friend bool operator==(const Time lhs, const Time rhs);
friend bool operator<(const Time lhs, const Time rhs);
friend bool operator>(const Time lhs, const Time rhs);
friend Time operator-(const Time val);
friend Time operator+(const Time val);
private:
#if TIME_VEC_WIDTH > 1
alignas(TIME_MEM_ALIGN) std::array<scalar_type, TIME_VEC_WIDTH> time_;
#else
scalar_type time_;
#endif
};
} //namepsace
#include "Time.inl"

View File

@ -0,0 +1,128 @@
#include <iostream>
#include <string>
#include "Time.hpp"
namespace tatum {
/*
* Class members
*/
#if TIME_VEC_WIDTH > 1
/*
* Serial / inferred SIMD
*/
inline void Time::set_value(scalar_type time) {
for(size_t i = 0; i < time_.size(); i++) {
time_[i] = time;
}
}
inline void Time::max(const Time& other) {
for(size_t i = 0; i < time_.size(); i++) {
//Use conditional so compiler will vectorize
time_[i] = (time_[i] > other.time_[i]) ? time_[i] : other.time_[i];
}
}
inline void Time::min(const Time& other) {
for(size_t i = 0; i < time_.size(); i++) {
//Use conditional so compiler will vectorize
time_[i] = (time_[i] < other.time_[i]) ? time_[i] : other.time_[i];
}
}
inline Time& Time::operator+=(const Time& rhs) {
for(size_t i = 0; i < time_.size(); i++) {
time_[i] += rhs.time_[i];
}
return *this;
}
inline Time& Time::operator-=(const Time& rhs) {
for(size_t i = 0; i < time_.size(); i++) {
time_[i] -= rhs.time_[i];
}
return *this;
}
inline Time::scalar_type Time::value() const { return time_[0]; }
inline bool Time::valid() const {
//This is a reduction with a function call inside,
//so we can't vectorize easily
bool result = true;
for(size_t i = 0; i < time_.size(); i++) {
result &= !std::isnan(time_[i]);
}
return result;
}
#else //Scalar case (TIME_VEC_WIDTH == 1)
inline Time::scalar_type Time::value() const { return time_; }
inline void Time::set_value(scalar_type time) { time_ = time; }
inline bool Time::valid() const { return !std::isnan(time_); }
inline void Time::max(const Time& other) { time_ = std::max(time_, other.time_); }
inline void Time::min(const Time& other) { time_ = std::min(time_, other.time_); }
inline Time& Time::operator+=(const Time& rhs) { time_ += rhs.time_; return *this; }
inline Time& Time::operator-=(const Time& rhs) { time_ -= rhs.time_; return *this; }
#endif //TIME_VEC_WIDTH
/*
* External functions
*/
#if TIME_VEC_WIDTH > 1
inline Time operator-(Time in) {
for(size_t i = 0; i < time_.size(); i++) {
in.time_[i] = -in.time_[i];
}
return in;
}
inline Time operator+(Time in) {
for(size_t i = 0; i < time_.size(); i++) {
in.time_[i] = +in.time_[i];
}
return in;
}
#else //Scalar case (TIME_VEC_WIDTH == 1)
inline bool operator==(const Time lhs, const Time rhs) {
return lhs.time_ == rhs.time_;
}
inline bool operator<(const Time lhs, const Time rhs) {
return lhs.time_ < rhs.time_;
}
inline bool operator>(const Time lhs, const Time rhs) {
return lhs.time_ > rhs.time_;
}
inline Time operator-(Time in) {
in.time_ = -in.time_;
return in;
}
inline Time operator+(Time in) {
in.time_ = +in.time_;
return in;
}
#endif //TIME_VEC_WIDTH
inline Time operator+(Time lhs, const Time& rhs) {
return lhs += rhs;
}
inline Time operator-(Time lhs, const Time& rhs) {
return lhs -= rhs;
}
inline std::ostream& operator<<(std::ostream& os, const Time& time) {
os << time.value();
return os;
}
} //namepsace

View File

@ -0,0 +1,571 @@
#include <iostream>
#include <limits>
#include "tatum/util/tatum_assert.hpp"
#include "tatum/TimingConstraints.hpp"
using std::cout;
using std::endl;
namespace tatum {
TimingConstraints::domain_range TimingConstraints::clock_domains() const {
return tatum::util::make_range(domain_ids_.begin(), domain_ids_.end());
}
std::string TimingConstraints::clock_domain_name(const DomainId id) const {
if(!id) {
return std::string("*");
}
return domain_names_[id];
}
NodeId TimingConstraints::clock_domain_source_node(const DomainId id) const {
return domain_sources_[id];
}
bool TimingConstraints::is_virtual_clock(const DomainId id) const {
//No source node indicates a virtual clock
return !bool(clock_domain_source_node(id));
}
DomainId TimingConstraints::node_clock_domain(const NodeId id) const {
//This is currenlty a linear search through all clock sources and
//I/O constraints, could be made more efficient but it is only called
//rarely (i.e. during pre-traversals)
//Is it a clock source?
DomainId source_domain = find_node_source_clock_domain(id);
if(source_domain) return source_domain;
//Does it have an input constarint?
for(DelayType delay_type : {DelayType::MAX, DelayType::MIN}) {
for(auto kv : input_constraints(delay_type)) {
auto node_id = kv.first;
auto domain_id = kv.second.domain;
//TODO: Assumes a single clock per node
if(node_id == id) return domain_id;
}
}
//Does it have an output constraint?
for(DelayType delay_type : {DelayType::MAX, DelayType::MIN}) {
for(auto kv : output_constraints(delay_type)) {
auto node_id = kv.first;
auto domain_id = kv.second.domain;
//TODO: Assumes a single clock per node
if(node_id == id) return domain_id;
}
}
//None found
return DomainId::INVALID();
}
bool TimingConstraints::node_is_clock_source(const NodeId id) const {
//Returns a DomainId which converts to true if valid
return bool(find_node_source_clock_domain(id));
}
bool TimingConstraints::node_is_constant_generator(const NodeId id) const {
return constant_generators_.count(id);
}
DomainId TimingConstraints::find_node_source_clock_domain(const NodeId node_id) const {
//We don't expect many clocks, so the linear search should be fine
for(auto domain_id : clock_domains()) {
if(clock_domain_source_node(domain_id) == node_id) {
return domain_id;
}
}
return DomainId::INVALID();
}
DomainId TimingConstraints::find_clock_domain(const std::string& name) const {
//Linear search for name
// We don't expect a large number of domains
for(DomainId id : clock_domains()) {
if(clock_domain_name(id) == name) {
return id;
}
}
//Not found
return DomainId::INVALID();
}
bool TimingConstraints::should_analyze(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node) const {
TATUM_ASSERT(src_domain);
TATUM_ASSERT(sink_domain);
//If there is a domain pair + capture node or domain pair constraint then it should be analyzed
return setup_constraints_.count(NodeDomainPair(src_domain, sink_domain, capture_node))
|| setup_constraints_.count(NodeDomainPair(src_domain, sink_domain, NodeId::INVALID()))
|| hold_constraints_.count(NodeDomainPair(src_domain, sink_domain, capture_node))
|| hold_constraints_.count(NodeDomainPair(src_domain, sink_domain, NodeId::INVALID()));
}
Time TimingConstraints::hold_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node) const {
//Try to find the capture node-specific constraint
auto iter = hold_constraints_.find(NodeDomainPair(src_domain, sink_domain, capture_node));
if(iter != hold_constraints_.end()) {
return iter->second;
}
//If no capture node specific constraint was found, fallback to the domain pair constriant
iter = hold_constraints_.find(NodeDomainPair(src_domain, sink_domain, NodeId::INVALID()));
if(iter != hold_constraints_.end()) {
return iter->second;
}
//No constraint found
return std::numeric_limits<Time>::quiet_NaN();
}
Time TimingConstraints::setup_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node) const {
//Try to find the capture node-specific constraint
auto iter = setup_constraints_.find(NodeDomainPair(src_domain, sink_domain, capture_node));
if(iter != setup_constraints_.end()) {
return iter->second;
}
//If no capture node specific constraint was found, fallback to the domain pair constriant
iter = setup_constraints_.find(NodeDomainPair(src_domain, sink_domain, NodeId::INVALID()));
if(iter != setup_constraints_.end()) {
return iter->second;
}
//No constraint found
return std::numeric_limits<Time>::quiet_NaN();
}
Time TimingConstraints::setup_clock_uncertainty(const DomainId src_domain, const DomainId sink_domain) const {
auto iter = setup_clock_uncertainties_.find(DomainPair(src_domain, sink_domain));
if(iter == setup_clock_uncertainties_.end()) {
return Time(0.); //Defaults to zero if unspecified
}
return iter->second;
}
Time TimingConstraints::hold_clock_uncertainty(const DomainId src_domain, const DomainId sink_domain) const {
auto iter = hold_clock_uncertainties_.find(DomainPair(src_domain, sink_domain));
if(iter == hold_clock_uncertainties_.end()) {
return Time(0.); //Defaults to zero if unspecified
}
return iter->second;
}
Time TimingConstraints::input_constraint(const NodeId node_id, const DomainId domain_id, const DelayType delay_type) const {
if (delay_type == DelayType::MAX) {
auto iter = find_io_constraint(node_id, domain_id, max_input_constraints_);
if(iter != max_input_constraints_.end()) {
return iter->second.constraint;
}
} else {
TATUM_ASSERT(delay_type == DelayType::MIN);
auto iter = find_io_constraint(node_id, domain_id, min_input_constraints_);
if(iter != min_input_constraints_.end()) {
return iter->second.constraint;
}
}
return std::numeric_limits<Time>::quiet_NaN();
}
Time TimingConstraints::output_constraint(const NodeId node_id, const DomainId domain_id, const DelayType delay_type) const {
if (delay_type == DelayType::MAX) {
auto iter = find_io_constraint(node_id, domain_id, max_output_constraints_);
if(iter != max_output_constraints_.end()) {
return iter->second.constraint;
}
} else {
TATUM_ASSERT(delay_type == DelayType::MIN);
auto iter = find_io_constraint(node_id, domain_id, min_output_constraints_);
if(iter != min_output_constraints_.end()) {
return iter->second.constraint;
}
}
return std::numeric_limits<Time>::quiet_NaN();
}
Time TimingConstraints::source_latency(const DomainId domain, const ArrivalType arrival_type) const {
if (arrival_type == ArrivalType::EARLY) {
auto iter = source_latencies_early_.find(domain);
if(iter == source_latencies_early_.end()) {
return Time(0.); //Defaults to zero if unspecified
}
return iter->second;
} else {
TATUM_ASSERT(arrival_type == ArrivalType::LATE);
auto iter = source_latencies_late_.find(domain);
if(iter == source_latencies_late_.end()) {
return Time(0.); //Defaults to zero if unspecified
}
return iter->second;
}
}
TimingConstraints::constant_generator_range TimingConstraints::constant_generators() const {
return tatum::util::make_range(constant_generators_.begin(), constant_generators_.end());
}
TimingConstraints::clock_constraint_range TimingConstraints::setup_constraints() const {
return tatum::util::make_range(setup_constraints_.begin(), setup_constraints_.end());
}
TimingConstraints::clock_constraint_range TimingConstraints::hold_constraints() const {
return tatum::util::make_range(hold_constraints_.begin(), hold_constraints_.end());
}
TimingConstraints::clock_uncertainty_range TimingConstraints::setup_clock_uncertainties() const {
return tatum::util::make_range(setup_clock_uncertainties_.begin(), setup_clock_uncertainties_.end());
}
TimingConstraints::clock_uncertainty_range TimingConstraints::hold_clock_uncertainties() const {
return tatum::util::make_range(hold_clock_uncertainties_.begin(), hold_clock_uncertainties_.end());
}
TimingConstraints::io_constraint_range TimingConstraints::input_constraints(const DelayType delay_type) const {
if (delay_type == DelayType::MAX) {
return tatum::util::make_range(max_input_constraints_.begin(), max_input_constraints_.end());
} else {
TATUM_ASSERT(delay_type == DelayType::MIN);
return tatum::util::make_range(min_input_constraints_.begin(), min_input_constraints_.end());
}
}
TimingConstraints::io_constraint_range TimingConstraints::output_constraints(const DelayType delay_type) const {
if (delay_type == DelayType::MAX) {
return tatum::util::make_range(max_output_constraints_.begin(), max_output_constraints_.end());
} else {
TATUM_ASSERT(delay_type == DelayType::MIN);
return tatum::util::make_range(min_output_constraints_.begin(), min_output_constraints_.end());
}
}
TimingConstraints::io_constraint_range TimingConstraints::input_constraints(const NodeId id, const DelayType delay_type) const {
if (delay_type == DelayType::MAX) {
auto range = max_input_constraints_.equal_range(id);
return tatum::util::make_range(range.first, range.second);
} else {
TATUM_ASSERT(delay_type == DelayType::MIN);
auto range = min_input_constraints_.equal_range(id);
return tatum::util::make_range(range.first, range.second);
}
}
TimingConstraints::io_constraint_range TimingConstraints::output_constraints(const NodeId id, const DelayType delay_type) const {
if (delay_type == DelayType::MAX) {
auto range = max_output_constraints_.equal_range(id);
return tatum::util::make_range(range.first, range.second);
} else {
TATUM_ASSERT(delay_type == DelayType::MIN);
auto range = min_output_constraints_.equal_range(id);
return tatum::util::make_range(range.first, range.second);
}
}
TimingConstraints::source_latency_range TimingConstraints::source_latencies(ArrivalType arrival_type) const {
if (arrival_type == ArrivalType::EARLY) {
return tatum::util::make_range(source_latencies_early_.begin(), source_latencies_early_.end());
} else {
TATUM_ASSERT(arrival_type == ArrivalType::LATE);
return tatum::util::make_range(source_latencies_late_.begin(), source_latencies_late_.end());
}
}
DomainId TimingConstraints::create_clock_domain(const std::string name) {
DomainId id = find_clock_domain(name);
if(!id) {
//Create it
id = DomainId(domain_ids_.size());
domain_ids_.push_back(id);
domain_names_.push_back(name);
domain_sources_.emplace_back(NodeId::INVALID());
TATUM_ASSERT(clock_domain_name(id) == name);
TATUM_ASSERT(find_clock_domain(name) == id);
}
return id;
}
void TimingConstraints::set_setup_constraint(const DomainId src_domain, const DomainId sink_domain, const Time constraint) {
set_setup_constraint(src_domain, sink_domain, NodeId::INVALID(), constraint);
}
void TimingConstraints::set_setup_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node, const Time constraint) {
auto key = NodeDomainPair(src_domain, sink_domain, capture_node);
setup_constraints_[key] = constraint;
}
void TimingConstraints::set_hold_constraint(const DomainId src_domain, const DomainId sink_domain, const Time constraint) {
set_hold_constraint(src_domain, sink_domain, NodeId::INVALID(), constraint);
}
void TimingConstraints::set_hold_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node, const Time constraint) {
auto key = NodeDomainPair(src_domain, sink_domain, capture_node);
hold_constraints_[key] = constraint;
}
void TimingConstraints::set_setup_clock_uncertainty(const DomainId src_domain, const DomainId sink_domain, const Time uncertainty) {
auto key = DomainPair(src_domain, sink_domain);
setup_clock_uncertainties_[key] = uncertainty;
}
void TimingConstraints::set_hold_clock_uncertainty(const DomainId src_domain, const DomainId sink_domain, const Time uncertainty) {
auto key = DomainPair(src_domain, sink_domain);
hold_clock_uncertainties_[key] = uncertainty;
}
void TimingConstraints::set_input_constraint(const NodeId node_id, const DomainId domain_id, const DelayType delay_type, const Time constraint) {
if (delay_type == DelayType::MAX) {
auto iter = find_io_constraint(node_id, domain_id, max_input_constraints_);
if(iter != max_input_constraints_.end()) {
//Found, update
iter->second.constraint = constraint;
} else {
//Not found create it
max_input_constraints_.insert(std::make_pair(node_id, IoConstraint(domain_id, constraint)));
}
} else {
TATUM_ASSERT(delay_type == DelayType::MIN);
auto iter = find_io_constraint(node_id, domain_id, min_input_constraints_);
if(iter != min_input_constraints_.end()) {
//Found, update
iter->second.constraint = constraint;
} else {
//Not found create it
min_input_constraints_.insert(std::make_pair(node_id, IoConstraint(domain_id, constraint)));
}
}
}
void TimingConstraints::set_output_constraint(const NodeId node_id, const DomainId domain_id, const DelayType delay_type, const Time constraint) {
if (delay_type == DelayType::MAX) {
auto iter = find_io_constraint(node_id, domain_id, max_output_constraints_);
if(iter != max_output_constraints_.end()) {
//Found, update
iter->second.constraint = constraint;
} else {
//Not found create it
max_output_constraints_.insert(std::make_pair(node_id, IoConstraint(domain_id, constraint)));
}
} else {
TATUM_ASSERT(delay_type == DelayType::MIN);
auto iter = find_io_constraint(node_id, domain_id, min_output_constraints_);
if(iter != min_output_constraints_.end()) {
//Found, update
iter->second.constraint = constraint;
} else {
//Not found create it
min_output_constraints_.insert(std::make_pair(node_id, IoConstraint(domain_id, constraint)));
}
}
}
void TimingConstraints::set_source_latency(const DomainId domain, const ArrivalType arrival_type, const Time latency) {
if (arrival_type == ArrivalType::EARLY) {
source_latencies_early_[domain] = latency;
} else {
TATUM_ASSERT(arrival_type == ArrivalType::LATE);
source_latencies_late_[domain] = latency;
}
}
void TimingConstraints::set_clock_domain_source(const NodeId node_id, const DomainId domain_id) {
domain_sources_[domain_id] = node_id;
}
void TimingConstraints::set_constant_generator(const NodeId node_id, bool is_constant_generator) {
if(is_constant_generator) {
constant_generators_.insert(node_id);
} else {
constant_generators_.erase(node_id);
}
}
void TimingConstraints::remap_nodes(const tatum::util::linear_map<NodeId,NodeId>& node_map) {
//Domain Sources
tatum::util::linear_map<DomainId,NodeId> remapped_domain_sources(domain_sources_.size());
for(size_t domain_idx = 0; domain_idx < domain_sources_.size(); ++domain_idx) {
DomainId domain_id(domain_idx);
NodeId old_node_id = domain_sources_[domain_id];
if(old_node_id) {
remapped_domain_sources[domain_id] = node_map[old_node_id];
}
}
domain_sources_ = std::move(remapped_domain_sources);
//Constant generators
std::unordered_set<NodeId> remapped_constant_generators;
for(NodeId node_id : constant_generators_) {
remapped_constant_generators.insert(node_map[node_id]);
}
constant_generators_ = std::move(remapped_constant_generators);
//Max Input Constraints
std::multimap<NodeId,IoConstraint> remapped_max_input_constraints;
for(auto kv : max_input_constraints_) {
NodeId new_node_id = node_map[kv.first];
remapped_max_input_constraints.insert(std::make_pair(new_node_id, kv.second));
}
max_input_constraints_ = std::move(remapped_max_input_constraints);
//Min Input Constraints
std::multimap<NodeId,IoConstraint> remapped_min_input_constraints;
for(auto kv : min_input_constraints_) {
NodeId new_node_id = node_map[kv.first];
remapped_min_input_constraints.insert(std::make_pair(new_node_id, kv.second));
}
min_input_constraints_ = std::move(remapped_min_input_constraints);
//Max Output Constraints
std::multimap<NodeId,IoConstraint> remapped_max_output_constraints;
for(auto kv : max_output_constraints_) {
NodeId new_node_id = node_map[kv.first];
remapped_max_output_constraints.insert(std::make_pair(new_node_id, kv.second));
}
max_output_constraints_ = std::move(remapped_max_output_constraints);
//Min Output Constraints
std::multimap<NodeId,IoConstraint> remapped_min_output_constraints;
for(auto kv : min_output_constraints_) {
NodeId new_node_id = node_map[kv.first];
remapped_min_output_constraints.insert(std::make_pair(new_node_id, kv.second));
}
min_output_constraints_ = std::move(remapped_min_output_constraints);
}
void TimingConstraints::print_constraints() const {
cout << "Setup Clock Constraints" << endl;
for(auto kv : setup_constraints()) {
auto key = kv.first;
Time constraint = kv.second;
cout << "SRC: " << key.domain_pair.src_domain_id;
cout << " SINK: " << key.domain_pair.sink_domain_id;
cout << " CAPTURE_NODE: " << key.capture_node;
cout << " Constraint: " << constraint;
cout << endl;
}
cout << "Hold Clock Constraints" << endl;
for(auto kv : hold_constraints()) {
auto key = kv.first;
Time constraint = kv.second;
cout << "SRC: " << key.domain_pair.src_domain_id;
cout << " SINK: " << key.domain_pair.sink_domain_id;
cout << " CAPTURE_NODE: " << key.capture_node;
cout << " Constraint: " << constraint;
cout << endl;
}
cout << "Max Input Constraints" << endl;
for(auto kv : input_constraints(DelayType::MAX)) {
auto node_id = kv.first;
auto io_constraint = kv.second;
cout << "Node: " << node_id;
cout << " Domain: " << io_constraint.domain;
cout << " Constraint: " << io_constraint.constraint;
cout << endl;
}
cout << "Min Input Constraints" << endl;
for(auto kv : input_constraints(DelayType::MIN)) {
auto node_id = kv.first;
auto io_constraint = kv.second;
cout << "Node: " << node_id;
cout << " Domain: " << io_constraint.domain;
cout << " Constraint: " << io_constraint.constraint;
cout << endl;
}
cout << "Max Output Constraints" << endl;
for(auto kv : output_constraints(DelayType::MAX)) {
auto node_id = kv.first;
auto io_constraint = kv.second;
cout << "Node: " << node_id;
cout << " Domain: " << io_constraint.domain;
cout << " Constraint: " << io_constraint.constraint;
cout << endl;
}
cout << "Min Output Constraints" << endl;
for(auto kv : output_constraints(DelayType::MIN)) {
auto node_id = kv.first;
auto io_constraint = kv.second;
cout << "Node: " << node_id;
cout << " Domain: " << io_constraint.domain;
cout << " Constraint: " << io_constraint.constraint;
cout << endl;
}
cout << "Setup Clock Uncertainty" << endl;
for(auto kv : setup_clock_uncertainties()) {
auto key = kv.first;
Time uncertainty = kv.second;
cout << "SRC: " << key.src_domain_id;
cout << " SINK: " << key.sink_domain_id;
cout << " Uncertainty: " << uncertainty;
cout << endl;
}
cout << "Hold Clock Uncertainty" << endl;
for(auto kv : hold_clock_uncertainties()) {
auto key = kv.first;
Time uncertainty = kv.second;
cout << "SRC: " << key.src_domain_id;
cout << " SINK: " << key.sink_domain_id;
cout << " Uncertainty: " << uncertainty;
cout << endl;
}
cout << "Early Source Latency" << endl;
for(auto kv : source_latencies(ArrivalType::EARLY)) {
auto domain = kv.first;
Time latency = kv.second;
cout << "Domain: " << domain;
cout << " Latency: " << latency;
cout << endl;
}
cout << "Late Source Latency" << endl;
for(auto kv : source_latencies(ArrivalType::LATE)) {
auto domain = kv.first;
Time latency = kv.second;
cout << "Domain: " << domain;
cout << " Latency: " << latency;
cout << endl;
}
}
TimingConstraints::io_constraint_iterator TimingConstraints::find_io_constraint(const NodeId node_id, const DomainId domain_id, const std::multimap<NodeId,IoConstraint>& io_constraints) const {
auto range = io_constraints.equal_range(node_id);
for(auto iter = range.first; iter != range.second; ++iter) {
if(iter->second.domain == domain_id) return iter;
}
//Not found
return io_constraints.end();
}
TimingConstraints::mutable_io_constraint_iterator TimingConstraints::find_io_constraint(const NodeId node_id, const DomainId domain_id, std::multimap<NodeId,IoConstraint>& io_constraints) {
auto range = io_constraints.equal_range(node_id);
for(auto iter = range.first; iter != range.second; ++iter) {
if(iter->second.domain == domain_id) return iter;
}
//Not found
return io_constraints.end();
}
} //namepsace

View File

@ -0,0 +1,229 @@
#pragma once
#include <map>
#include <vector>
#include <unordered_set>
#include "tatum/util/tatum_linear_map.hpp"
#include "tatum/util/tatum_range.hpp"
#include "tatum/base/ArrivalType.hpp"
#include "tatum/base/DelayType.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/Time.hpp"
namespace tatum {
/**
* The TimingConstraints class stores all the timing constraints applied during timing analysis.
*/
class TimingConstraints {
public: //Types
typedef tatum::util::linear_map<DomainId,DomainId>::const_iterator domain_iterator;
typedef std::map<NodeDomainPair,Time>::const_iterator clock_constraint_iterator;
typedef std::map<DomainPair,Time>::const_iterator clock_uncertainty_iterator;
typedef std::multimap<NodeId,IoConstraint>::const_iterator io_constraint_iterator;
typedef std::map<DomainId,Time>::const_iterator source_latency_iterator;
typedef std::unordered_set<NodeId>::const_iterator constant_generator_iterator;
typedef tatum::util::Range<domain_iterator> domain_range;
typedef tatum::util::Range<clock_constraint_iterator> clock_constraint_range;
typedef tatum::util::Range<clock_uncertainty_iterator> clock_uncertainty_range;
typedef tatum::util::Range<io_constraint_iterator> io_constraint_range;
typedef tatum::util::Range<source_latency_iterator> source_latency_range;
typedef tatum::util::Range<constant_generator_iterator> constant_generator_range;
public: //Accessors
///\returns A range containing all defined clock domains
domain_range clock_domains() const;
///\returns The name of a clock domain
std::string clock_domain_name(const DomainId id) const;
///\returns The source NodeId of the specified domain
NodeId clock_domain_source_node(const DomainId id) const;
//\returns whether the specified domain id corresponds to a virtual lcock
bool is_virtual_clock(const DomainId id) const;
///\returns The domain of the specified node id if it is a clock source
DomainId node_clock_domain(const NodeId id) const;
///\returns True if the node id is a clock source
bool node_is_clock_source(const NodeId id) const;
///\returns True if the node id is a constant generator
bool node_is_constant_generator(const NodeId id) const;
///\returns A valid DomainId if a clock domain with the specified name exists, DomainId::INVALID() otherwise
DomainId find_clock_domain(const std::string& name) const;
///Indicates whether the paths between src_domain and sink_domain should be analyzed
///\param src_domain The ID of the source (launch) clock domain
///\param sink_domain The ID of the sink (capture) clock domain
bool should_analyze(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node=NodeId::INVALID()) const;
///\returns The setup (max) constraint between src_domain and sink_domain at the specified capture_node_id
Time setup_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node=NodeId::INVALID()) const;
///\returns The hold (min) constraint between src_domain and sink_domain at the specified capture_node_id
Time hold_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node=NodeId::INVALID()) const;
///\returns The setup clock uncertainty between src_domain and sink_domain (defaults to zero if unspecified)
Time setup_clock_uncertainty(const DomainId src_domain, const DomainId sink_domain) const;
///\returns The hold clock uncertainty between src_domain and sink_domain (defaults to zero if unspecified)
Time hold_clock_uncertainty(const DomainId src_domain, const DomainId sink_domain) const;
///\returns The input delay constraint on node_id
Time input_constraint(const NodeId node_id, const DomainId domain_id, const DelayType delay_type) const;
///\returns The output delay constraint on node_id
Time output_constraint(const NodeId node_id, const DomainId domain_id, const DelayType delay_type) const;
///\returns The external (e.g. off-chip) source latency of a particular clock domain
//
//Corresponds to the delay from the clock's true source to it's definition point on-chip
Time source_latency(const DomainId domain_id, ArrivalType arrival_type) const;
///\returns A range of all constant generator nodes
constant_generator_range constant_generators() const;
///\returns A range of all setup constraints
clock_constraint_range setup_constraints() const;
///\returns A range of all setup constraints
clock_constraint_range hold_constraints() const;
///\returns A range of all setup clock uncertainties
clock_uncertainty_range setup_clock_uncertainties() const;
///\returns A range of all hold clock uncertainties
clock_uncertainty_range hold_clock_uncertainties() const;
///\returns A range of all input constraints
io_constraint_range input_constraints(const DelayType delay_type) const;
///\returns A range of all output constraints
io_constraint_range output_constraints(const DelayType delay_type) const;
///\returns A range of output constraints for the node id
io_constraint_range input_constraints(const NodeId id, const DelayType delay_type) const;
///\returns A range of input constraints for the node id
io_constraint_range output_constraints(const NodeId id, const DelayType delay_type) const;
///\returns A range of all clock source latencies
source_latency_range source_latencies(ArrivalType arrival_type) const;
///Prints out the timing constraints for debug purposes
void print_constraints() const;
public: //Mutators
///\returns The DomainId of the clock with the specified name (will be created if it doesn not exist)
DomainId create_clock_domain(const std::string name);
///Sets the setup constraint between src_domain and sink_domain with value constraint
void set_setup_constraint(const DomainId src_domain, const DomainId sink_domain, const Time constraint);
void set_setup_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node, const Time constraint);
///Sets the hold constraint between src_domain and sink_domain with value constraint
void set_hold_constraint(const DomainId src_domain, const DomainId sink_domain, const Time constraint);
void set_hold_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node, const Time constraint);
///Sets the setup clock uncertainty between src_domain and sink_domain with value uncertainty
void set_setup_clock_uncertainty(const DomainId src_domain, const DomainId sink_domain, const Time uncertainty);
///Sets the hold clock uncertainty between src_domain and sink_domain with value uncertainty
void set_hold_clock_uncertainty(const DomainId src_domain, const DomainId sink_domain, const Time uncertainty);
///Sets the input delay constraint on node_id with value constraint
void set_input_constraint(const NodeId node_id, const DomainId domain_id, const DelayType delay_type, const Time constraint);
///Sets the output delay constraint on node_id with value constraint
void set_output_constraint(const NodeId node_id, const DomainId domain_id, const DelayType delay_type, const Time constraint);
///Sets the source latency of the specified clock domain
void set_source_latency(const DomainId domain_id, const ArrivalType arrival_type, const Time latency);
///Sets the source node for the specified clock domain
void set_clock_domain_source(const NodeId node_id, const DomainId domain_id);
///Sets whether the specified node is a constant generator
void set_constant_generator(const NodeId node_id, bool is_constant_generator=true);
///Update node IDs if they have changed
///\param node_map A vector mapping from old to new node ids
void remap_nodes(const tatum::util::linear_map<NodeId,NodeId>& node_map);
private:
typedef std::multimap<NodeId,IoConstraint>::iterator mutable_io_constraint_iterator;
private:
///\returns A valid domain id if the node is a clock source
DomainId find_node_source_clock_domain(const NodeId node_id) const;
io_constraint_iterator find_io_constraint(const NodeId node_id, const DomainId domain_id, const std::multimap<NodeId,IoConstraint>& io_constraints) const;
mutable_io_constraint_iterator find_io_constraint(const NodeId node_id, const DomainId domain_id, std::multimap<NodeId,IoConstraint>& io_constraints);
private: //Data
tatum::util::linear_map<DomainId,DomainId> domain_ids_;
tatum::util::linear_map<DomainId,std::string> domain_names_;
tatum::util::linear_map<DomainId,NodeId> domain_sources_;
std::unordered_set<NodeId> constant_generators_;
//The setup/hold constraints between clock domains and sink nodes
//If the key's capture_node is INVALID() it is treated as a wildcard (i.e. default)
//constraint
std::map<NodeDomainPair,Time> setup_constraints_;
std::map<NodeDomainPair,Time> hold_constraints_;
std::map<DomainPair,Time> setup_clock_uncertainties_;
std::map<DomainPair,Time> hold_clock_uncertainties_;
std::multimap<NodeId,IoConstraint> max_input_constraints_;
std::multimap<NodeId,IoConstraint> min_input_constraints_;
std::multimap<NodeId,IoConstraint> max_output_constraints_;
std::multimap<NodeId,IoConstraint> min_output_constraints_;
std::map<DomainId,Time> source_latencies_early_;
std::map<DomainId,Time> source_latencies_late_;
};
/*
* Utility classes
*/
struct DomainPair {
DomainPair(DomainId src, DomainId sink): src_domain_id(src), sink_domain_id(sink) {}
friend bool operator<(const DomainPair& lhs, const DomainPair& rhs) {
return std::tie(lhs.src_domain_id, lhs.sink_domain_id) < std::tie(rhs.src_domain_id, rhs.sink_domain_id);
}
DomainId src_domain_id;
DomainId sink_domain_id;
};
struct NodeDomainPair {
NodeDomainPair(DomainId src, DomainId sink, NodeId to_node)
: domain_pair(src, sink), capture_node(to_node) {}
friend bool operator<(const NodeDomainPair& lhs, const NodeDomainPair& rhs) {
return std::tie(lhs.capture_node, lhs.domain_pair) < std::tie(rhs.capture_node, rhs.domain_pair);
}
DomainPair domain_pair;
NodeId capture_node; //Should be treated as a wild-card if capture_node is NodeId::INVALID()
};
struct IoConstraint {
IoConstraint(DomainId domain_id, Time constraint_val): domain(domain_id), constraint(constraint_val) {}
DomainId domain;
Time constraint;
};
} //namepsace

View File

@ -0,0 +1,11 @@
#pragma once
namespace tatum {
//Forward delcaration
struct DomainPair;
struct NodeDomainPair;
struct IoConstraint;
class TimingConstraints;
} //namepsace

View File

@ -0,0 +1,990 @@
#include <algorithm>
#include <iostream>
#include <sstream>
#include <map>
#include "tatum/util/tatum_assert.hpp"
#include "tatum/base/loop_detect.hpp"
#include "tatum/error.hpp"
#include "tatum/TimingGraph.hpp"
namespace tatum {
//Builds a mapping from old to new ids by skipping values marked invalid
template<typename Id>
tatum::util::linear_map<Id,Id> compress_ids(const tatum::util::linear_map<Id,Id>& ids) {
tatum::util::linear_map<Id,Id> id_map(ids.size());
size_t i = 0;
for(auto id : ids) {
if(id) {
//Valid
id_map.insert(id, Id(i));
++i;
}
}
return id_map;
}
//Returns a vector based on 'values', which has had entries dropped and
//re-ordered according according to 'id_map'.
//
// Each entry in id_map corresponds to the assoicated element in 'values'.
// The value of the id_map entry is the new ID of the entry in values.
//
// If it is an invalid ID, the element in values is dropped.
// Otherwise the element is moved to the new ID location.
template<typename Id, typename T>
tatum::util::linear_map<Id,T> clean_and_reorder_values(const tatum::util::linear_map<Id,T>& values, const tatum::util::linear_map<Id,Id>& id_map) {
TATUM_ASSERT(values.size() == id_map.size());
//Allocate space for the values that will not be dropped
tatum::util::linear_map<Id,T> result;
//Move over the valid entries to their new locations
for(size_t cur_idx = 0; cur_idx < values.size(); ++cur_idx) {
Id old_id = Id(cur_idx);
Id new_id = id_map[old_id];
if (new_id) {
//There is a valid mapping
result.insert(new_id, std::move(values[old_id]));
}
}
return result;
}
//Returns the set of new valid Ids defined by 'id_map'
//TOOD: merge with clean_and_reorder_values
template<typename Id>
tatum::util::linear_map<Id,Id> clean_and_reorder_ids(const tatum::util::linear_map<Id,Id>& id_map) {
//For IDs, the values are the new id's stored in the map
//Allocate a new vector to store the values that have been not dropped
tatum::util::linear_map<Id,Id> result;
//Move over the valid entries to their new locations
for(size_t cur_idx = 0; cur_idx < id_map.size(); ++cur_idx) {
Id old_id = Id(cur_idx);
Id new_id = id_map[old_id];
if (new_id) {
result.insert(new_id, new_id);
}
}
return result;
}
template<typename Container, typename ValId>
Container update_valid_refs(const Container& values, const tatum::util::linear_map<ValId,ValId>& id_map) {
Container updated;
for(ValId orig_val : values) {
if(orig_val) {
//Original item valid
ValId new_val = id_map[orig_val];
if(new_val) {
//The original item exists in the new mapping
updated.emplace_back(new_val);
}
}
}
return updated;
}
//Updates the Ids in 'values' based on id_map, even if the original or new mapping is not valid
template<typename Container, typename ValId>
Container update_all_refs(const Container& values, const tatum::util::linear_map<ValId,ValId>& id_map) {
Container updated;
for(ValId orig_val : values) {
//The original item was valid
ValId new_val = id_map[orig_val];
//The original item exists in the new mapping
updated.emplace_back(new_val);
}
return updated;
}
//Recursive helper functions for collecting transitively connected nodes
void find_transitive_fanout_nodes_recurr(const TimingGraph& tg,
std::vector<NodeId>& nodes,
const NodeId node,
size_t max_depth=std::numeric_limits<size_t>::max(),
size_t depth=0);
void find_transitive_fanin_nodes_recurr(const TimingGraph& tg,
std::vector<NodeId>& nodes,
const NodeId node,
size_t max_depth=std::numeric_limits<size_t>::max(),
size_t depth=0);
size_t TimingGraph::node_num_active_in_edges(const NodeId node) const {
size_t active_edges = 0;
for (EdgeId edge : node_in_edges(node)) {
if (!edge_disabled(edge)) {
++active_edges;
}
}
return active_edges;
}
EdgeId TimingGraph::node_clock_capture_edge(const NodeId node) const {
if(node_type(node) == NodeType::SINK) {
//Only sinks can have clock capture edges
//Look through the edges for the incoming clock edge
for(EdgeId edge : node_in_edges(node)) {
if(edge_type(edge) == EdgeType::PRIMITIVE_CLOCK_CAPTURE) {
return edge;
}
}
}
return EdgeId::INVALID();
}
EdgeId TimingGraph::node_clock_launch_edge(const NodeId node) const {
if(node_type(node) == NodeType::SOURCE) {
//Only sources can have clock capture edges
//Look through the edges for the incoming clock edge
for(EdgeId edge : node_in_edges(node)) {
if(edge_type(edge) == EdgeType::PRIMITIVE_CLOCK_LAUNCH) {
return edge;
}
}
}
return EdgeId::INVALID();
}
NodeId TimingGraph::add_node(const NodeType type) {
//Invalidate the levelization
is_levelized_ = false;
//Reserve an ID
NodeId node_id = NodeId(node_ids_.size());
node_ids_.push_back(node_id);
//Type
node_types_.push_back(type);
//Edges
node_out_edges_.push_back(std::vector<EdgeId>());
node_in_edges_.push_back(std::vector<EdgeId>());
//Verify sizes
TATUM_ASSERT(node_types_.size() == node_out_edges_.size());
TATUM_ASSERT(node_types_.size() == node_in_edges_.size());
//Return the ID of the added node
return node_id;
}
EdgeId TimingGraph::add_edge(const EdgeType type, const NodeId src_node, const NodeId sink_node) {
//We require that the source/sink node must already be in the graph,
// so we can update them with thier edge references
TATUM_ASSERT(valid_node_id(src_node));
TATUM_ASSERT(valid_node_id(sink_node));
//Invalidate the levelization
is_levelized_ = false;
//Reserve an edge ID
EdgeId edge_id = EdgeId(edge_ids_.size());
edge_ids_.push_back(edge_id);
//Create the edgge
edge_types_.push_back(type);
edge_src_nodes_.push_back(src_node);
edge_sink_nodes_.push_back(sink_node);
edges_disabled_.push_back(false);
//Verify
TATUM_ASSERT(edge_sink_nodes_.size() == edge_src_nodes_.size());
//Update the nodes the edge references
node_out_edges_[src_node].push_back(edge_id);
node_in_edges_[sink_node].push_back(edge_id);
TATUM_ASSERT(edge_type(edge_id) == type);
TATUM_ASSERT(edge_src_node(edge_id) == src_node);
TATUM_ASSERT(edge_sink_node(edge_id) == sink_node);
//Return the edge id of the added edge
return edge_id;
}
void TimingGraph::remove_node(const NodeId node_id) {
TATUM_ASSERT(valid_node_id(node_id));
//Invalidate the levelization
is_levelized_ = false;
//Invalidate all the references
for(EdgeId in_edge : node_in_edges(node_id)) {
if(!in_edge) continue;
remove_edge(in_edge);
}
for(EdgeId out_edge : node_out_edges(node_id)) {
if(!out_edge) continue;
remove_edge(out_edge);
}
//Mark the node as invalid
node_ids_[node_id] = NodeId::INVALID();
}
void TimingGraph::remove_edge(const EdgeId edge_id) {
TATUM_ASSERT(valid_edge_id(edge_id));
//Invalidate the levelization
is_levelized_ = false;
//Invalidate the upstream node to edge references
NodeId src_node = edge_src_node(edge_id);
auto iter_out = std::find(node_out_edges_[src_node].begin(), node_out_edges_[src_node].end(), edge_id);
TATUM_ASSERT(iter_out != node_out_edges_[src_node].end());
*iter_out = EdgeId::INVALID();
//Invalidate the downstream node to edge references
NodeId sink_node = edge_sink_node(edge_id);
auto iter_in = std::find(node_in_edges_[sink_node].begin(), node_in_edges_[sink_node].end(), edge_id);
TATUM_ASSERT(iter_in != node_in_edges_[sink_node].end());
*iter_in = EdgeId::INVALID();
//Mark the edge invalid
edge_ids_[edge_id] = EdgeId::INVALID();
}
void TimingGraph::disable_edge(const EdgeId edge, bool disable) {
TATUM_ASSERT(valid_edge_id(edge));
if(edges_disabled_[edge] != disable) {
//If we are changing edges the levelization is no longer valid
is_levelized_ = false;
}
//Update the edge's disabled flag
edges_disabled_[edge] = disable;
}
EdgeType TimingGraph::edge_type(const EdgeId edge) const {
TATUM_ASSERT(valid_edge_id(edge));
return edge_types_[edge];
}
EdgeId TimingGraph::find_edge(const tatum::NodeId src_node, const tatum::NodeId sink_node) const {
TATUM_ASSERT(valid_node_id(src_node));
TATUM_ASSERT(valid_node_id(sink_node));
for(EdgeId edge : node_out_edges(src_node)) {
if(edge_sink_node(edge) == sink_node) {
return edge;
}
}
return EdgeId::INVALID();
}
GraphIdMaps TimingGraph::compress() {
auto node_id_map = compress_ids(node_ids_);
auto edge_id_map = compress_ids(edge_ids_);
remap_nodes(node_id_map);
remap_edges(edge_id_map);
levelize();
validate();
return {node_id_map, edge_id_map};
}
void TimingGraph::levelize() {
if(!is_levelized_) {
force_levelize();
}
}
void TimingGraph::force_levelize() {
//Levelizes the timing graph
//This over-writes any previous levelization if it exists.
//
//Also records primary outputs
//Clear any previous levelization
level_nodes_.clear();
level_ids_.clear();
primary_inputs_.clear();
logical_outputs_.clear();
//Allocate space for the first level
level_nodes_.resize(1);
//Copy the number of input edges per-node
//These will be decremented to know when all a node's upstream parents have been
//placed in a previous level (indicating that the node goes in the current level)
//
//Also initialize the first level (nodes with no fanin)
std::vector<int> node_fanin_remaining(nodes().size());
for(NodeId node_id : nodes()) {
size_t node_fanin = 0;
for(EdgeId edge : node_in_edges(node_id)) {
if(edge_disabled(edge)) continue;
++node_fanin;
}
node_fanin_remaining[size_t(node_id)] = node_fanin;
//Initialize the first level
if(node_fanin == 0) {
level_nodes_[LevelId(0)].push_back(node_id);
if (node_type(node_id) == NodeType::SOURCE) {
//We require that all primary inputs (i.e. top-level circuit inputs) to
//be SOURCEs. Due to disconnected nodes we may have non-SOURCEs which
//appear in the first level.
primary_inputs_.push_back(node_id);
}
}
}
//Walk the graph from primary inputs (no fanin) to generate a topological sort
//
//We inspect the output edges of each node and decrement the fanin count of the
//target node. Once the fanin count for a node reaches zero it can be added
//to the current level.
int level_idx = 0;
level_ids_.emplace_back(level_idx);
bool inserted_node_in_level = true;
while(inserted_node_in_level) { //If nothing was inserted we are finished
inserted_node_in_level = false;
for(const NodeId node_id : level_nodes_[LevelId(level_idx)]) {
//Inspect the fanout
for(EdgeId edge_id : node_out_edges(node_id)) {
if(edge_disabled(edge_id)) continue;
NodeId sink_node = edge_sink_node(edge_id);
//Decrement the fanin count
TATUM_ASSERT(node_fanin_remaining[size_t(sink_node)] > 0);
node_fanin_remaining[size_t(sink_node)]--;
//Add to the next level if all fanin has been seen
if(node_fanin_remaining[size_t(sink_node)] == 0) {
//Ensure there is space by allocating the next level if required
level_nodes_.resize(level_idx+2);
//Add the node
level_nodes_[LevelId(level_idx+1)].push_back(sink_node);
inserted_node_in_level = true;
}
}
//Also track the primary outputs (those with fan-in AND no fan-out)
//
// There may be some node with neither any fan-in or fan-out.
// We will treat them as primary inputs, so they should not be to
// the primary outputs
if( node_out_edges(node_id).size() == 0
&& node_in_edges(node_id).size() != 0
&& node_type(node_id) == NodeType::SINK) {
logical_outputs_.push_back(node_id);
}
}
if(inserted_node_in_level) {
level_idx++;
level_ids_.emplace_back(level_idx);
}
}
//Mark the levelization as valid
is_levelized_ = true;
}
bool TimingGraph::validate() const {
bool valid = true;
valid &= validate_sizes();
valid &= validate_values();
valid &= validate_structure();
return valid;
}
GraphIdMaps TimingGraph::optimize_layout() {
auto node_id_map = optimize_node_layout();
remap_nodes(node_id_map);
levelize();
auto edge_id_map = optimize_edge_layout();
remap_edges(edge_id_map);
levelize();
return {node_id_map, edge_id_map};
}
tatum::util::linear_map<EdgeId,EdgeId> TimingGraph::optimize_edge_layout() const {
//Make all edges in a level be contiguous in memory
//Determine the edges driven by each level of the graph
std::vector<std::vector<EdgeId>> edge_levels;
for(LevelId level_id : levels()) {
edge_levels.push_back(std::vector<EdgeId>());
for(auto node_id : level_nodes(level_id)) {
//We walk the nodes according to the input-edge order.
//This is the same order used by the arrival-time traversal (which is responsible
//for most of the analyzer run-time), so matching it's order exactly results in
//better cache locality
for(EdgeId edge_id : node_in_edges(node_id)) {
//edge_id is driven by nodes in level level_idx
edge_levels[size_t(level_id)].push_back(edge_id);
}
}
}
//Maps from from original to new edge id, used to update node to edge refs
tatum::util::linear_map<EdgeId,EdgeId> orig_to_new_edge_id(edges().size());
//Determine the new order
size_t iedge = 0;
for(auto& edge_level : edge_levels) {
for(const EdgeId orig_edge_id : edge_level) {
//Save the new edge id to update nodes
orig_to_new_edge_id[orig_edge_id] = EdgeId(iedge);
++iedge;
}
}
for(auto new_id : orig_to_new_edge_id) {
TATUM_ASSERT(new_id);
}
TATUM_ASSERT(iedge == edges().size());
return orig_to_new_edge_id;
}
tatum::util::linear_map<NodeId,NodeId> TimingGraph::optimize_node_layout() const {
//Make all nodes in a level be contiguous in memory
/*
* Keep a map of the old and new node ids to update edges
* and node levels later
*/
tatum::util::linear_map<NodeId,NodeId> orig_to_new_node_id(nodes().size());
//Determine the new order
size_t inode = 0;
for(const LevelId level_id : levels()) {
for(const NodeId old_node_id : level_nodes(level_id)) {
//Record the new node id
orig_to_new_node_id[old_node_id] = NodeId(inode);
++inode;
}
}
for(auto new_id : orig_to_new_node_id) {
TATUM_ASSERT(new_id);
}
TATUM_ASSERT(inode == nodes().size());
return orig_to_new_node_id;
}
void TimingGraph::remap_nodes(const tatum::util::linear_map<NodeId,NodeId>& node_id_map) {
is_levelized_ = false;
//Update values
node_ids_ = clean_and_reorder_ids(node_id_map);
node_types_ = clean_and_reorder_values(node_types_, node_id_map);
node_in_edges_ = clean_and_reorder_values(node_in_edges_, node_id_map);
node_out_edges_ = clean_and_reorder_values(node_out_edges_, node_id_map);
//Update references
edge_src_nodes_ = update_all_refs(edge_src_nodes_, node_id_map);
edge_sink_nodes_ = update_all_refs(edge_sink_nodes_, node_id_map);
}
void TimingGraph::remap_edges(const tatum::util::linear_map<EdgeId,EdgeId>& edge_id_map) {
is_levelized_ = false;
//Update values
edge_ids_ = clean_and_reorder_ids(edge_id_map);
edge_types_ = clean_and_reorder_values(edge_types_, edge_id_map);
edge_sink_nodes_ = clean_and_reorder_values(edge_sink_nodes_, edge_id_map);
edge_src_nodes_ = clean_and_reorder_values(edge_src_nodes_, edge_id_map);
edges_disabled_ = clean_and_reorder_values(edges_disabled_, edge_id_map);
//Update cross-references
for(auto& edges_ref : node_in_edges_) {
edges_ref = update_valid_refs(edges_ref, edge_id_map);
}
for(auto& edges_ref : node_out_edges_) {
edges_ref = update_valid_refs(edges_ref, edge_id_map);
}
}
bool TimingGraph::valid_node_id(const NodeId node_id) const {
return size_t(node_id) < node_ids_.size();
}
bool TimingGraph::valid_edge_id(const EdgeId edge_id) const {
return size_t(edge_id) < edge_ids_.size();
}
bool TimingGraph::valid_level_id(const LevelId level_id) const {
return size_t(level_id) < level_ids_.size();
}
bool TimingGraph::validate_sizes() const {
if ( node_ids_.size() != node_types_.size()
|| node_ids_.size() != node_in_edges_.size()
|| node_ids_.size() != node_out_edges_.size()) {
throw tatum::Error("Inconsistent node attribute sizes");
}
if ( edge_ids_.size() != edge_types_.size()
|| edge_ids_.size() != edge_sink_nodes_.size()
|| edge_ids_.size() != edge_src_nodes_.size()
|| edge_ids_.size() != edges_disabled_.size()) {
throw tatum::Error("Inconsistent edge attribute sizes");
}
if (level_ids_.size() != level_nodes_.size()) {
throw tatum::Error("Inconsistent level attribute sizes");
}
return true;
}
bool TimingGraph::validate_values() const {
for(NodeId node_id : nodes()) {
if(!valid_node_id(node_id)) {
throw tatum::Error("Invalid node id", node_id);
}
for(EdgeId edge_id : node_in_edges_[node_id]) {
if(!valid_edge_id(edge_id)) {
throw tatum::Error("Invalid node-in-edge reference", node_id, edge_id);
}
//Check that the references are consistent
if(edge_sink_nodes_[edge_id] != node_id) {
throw tatum::Error("Mismatched edge-sink/node-in-edge reference", node_id, edge_id);
}
}
for(EdgeId edge_id : node_out_edges_[node_id]) {
if(!valid_edge_id(edge_id)) {
throw tatum::Error("Invalid node-out-edge reference", node_id, edge_id);
}
//Check that the references are consistent
if(edge_src_nodes_[edge_id] != node_id) {
throw tatum::Error("Mismatched edge-src/node-out-edge reference", node_id, edge_id);
}
}
}
for(EdgeId edge_id : edges()) {
if(!valid_edge_id(edge_id)) {
throw tatum::Error("Invalid edge id", edge_id);
}
NodeId src_node = edge_src_nodes_[edge_id];
if(!valid_node_id(src_node)) {
throw tatum::Error("Invalid edge source node", src_node, edge_id);
}
NodeId sink_node = edge_sink_nodes_[edge_id];
if(!valid_node_id(sink_node)) {
throw tatum::Error("Invalid edge sink node", sink_node, edge_id);
}
}
//TODO: more checking
return true;
}
bool TimingGraph::validate_structure() const {
//Verify that the timing graph connectivity is as expected
if (!is_levelized_) {
throw tatum::Error("Timing graph must be levelized for structural validation");
}
for(NodeId src_node : nodes()) {
NodeType src_type = node_type(src_node);
auto out_edges = node_out_edges(src_node);
auto in_edges = node_in_edges(src_node);
//Check expected number of fan-in/fan-out edges
if(src_type == NodeType::SOURCE) {
if(in_edges.size() > 1) {
throw tatum::Error("SOURCE node has more than one active incoming edge (expected 0 if primary input, or 1 if clocked)", src_node);
}
} else if (src_type == NodeType::SINK) {
if(out_edges.size() > 0) {
throw tatum::Error("SINK node has out-going edges", src_node);
}
} else if (src_type == NodeType::IPIN) {
if(in_edges.size() == 0 && !allow_dangling_combinational_nodes_) {
throw tatum::Error("IPIN has no in-coming edges", src_node);
}
if(out_edges.size() == 0 && !allow_dangling_combinational_nodes_) {
throw tatum::Error("IPIN has no out-going edges", src_node);
}
} else if (src_type == NodeType::OPIN) {
//May have no incoming edges if a constant generator, so don't check that case
if(out_edges.size() == 0 && !allow_dangling_combinational_nodes_) {
throw tatum::Error("OPIN has no out-going edges", src_node);
}
} else {
TATUM_ASSERT(src_type == NodeType::CPIN);
if(in_edges.size() == 0) {
throw tatum::Error("CPIN has no in-coming edges", src_node);
}
//We do not check for out-going cpin edges, since there is no reason that
//a clock pin must be used
}
//Check node-type edge connectivity
for(EdgeId out_edge : node_out_edges(src_node)) {
NodeId sink_node = edge_sink_node(out_edge);
NodeType sink_type = node_type(sink_node);
EdgeType out_edge_type = edge_type(out_edge);
//Check type connectivity
if (src_type == NodeType::SOURCE) {
if( sink_type != NodeType::IPIN
&& sink_type != NodeType::OPIN
&& sink_type != NodeType::CPIN
&& sink_type != NodeType::SINK) {
throw tatum::Error("SOURCE nodes should only drive IPIN, OPIN, CPIN or SINK nodes", src_node, out_edge);
}
if(sink_type == NodeType::SINK) {
if( out_edge_type != EdgeType::INTERCONNECT
&& out_edge_type != EdgeType::PRIMITIVE_COMBINATIONAL) {
throw tatum::Error("SOURCE to SINK edges should always be either INTERCONNECT or PRIMTIIVE_COMBINATIONAL type edges", src_node, out_edge);
}
} else if (sink_type == NodeType::OPIN) {
if(out_edge_type != EdgeType::PRIMITIVE_COMBINATIONAL) {
throw tatum::Error("SOURCE to OPIN edges should always be PRIMITIVE_COMBINATIONAL type edges", src_node, out_edge);
}
} else {
TATUM_ASSERT(sink_type == NodeType::IPIN || sink_type == NodeType::CPIN);
if(out_edge_type != EdgeType::INTERCONNECT) {
throw tatum::Error("SOURCE to IPIN/CPIN edges should always be INTERCONNECT type edges", src_node, out_edge);
}
}
} else if (src_type == NodeType::SINK) {
throw tatum::Error("SINK nodes should not have out-going edges", sink_node);
} else if (src_type == NodeType::IPIN) {
if(sink_type != NodeType::OPIN && sink_type != NodeType::SINK) {
throw tatum::Error("IPIN nodes should only drive OPIN or SINK nodes", src_node, out_edge);
}
if(out_edge_type != EdgeType::PRIMITIVE_COMBINATIONAL) {
throw tatum::Error("IPIN to OPIN/SINK edges should always be PRIMITIVE_COMBINATIONAL type edges", src_node, out_edge);
}
} else if (src_type == NodeType::OPIN) {
if( sink_type != NodeType::IPIN
&& sink_type != NodeType::CPIN
&& sink_type != NodeType::SINK) {
throw tatum::Error("OPIN nodes should only drive IPIN, CPIN or SINK nodes", src_node, out_edge);
}
if(out_edge_type != EdgeType::INTERCONNECT) {
throw tatum::Error("OPIN out edges should always be INTERCONNECT type edges", src_node, out_edge);
}
} else if (src_type == NodeType::CPIN) {
if( sink_type != NodeType::SOURCE
&& sink_type != NodeType::SINK
&& sink_type != NodeType::OPIN) {
throw tatum::Error("CPIN nodes should only drive SOURCE, OPIN or SINK nodes", src_node, out_edge);
}
if(sink_type == NodeType::SOURCE && out_edge_type != EdgeType::PRIMITIVE_CLOCK_LAUNCH) {
throw tatum::Error("CPIN to SOURCE edges should always be PRIMITIVE_CLOCK_LAUNCH type edges", src_node, out_edge);
} else if (sink_type == NodeType::SINK && out_edge_type != EdgeType::PRIMITIVE_CLOCK_CAPTURE) {
throw tatum::Error("CPIN to SINK edges should always be PRIMITIVE_CLOCK_CAPTURE type edges", src_node, out_edge);
}
} else {
throw tatum::Error("Unrecognized node type", src_node, out_edge);
}
}
}
//Record the nodes assoicated with each edge
std::map<std::pair<NodeId,NodeId>,std::vector<EdgeId>> edge_nodes;
for(EdgeId edge : edges()) {
NodeId src_node = edge_src_node(edge);
NodeId sink_node = edge_sink_node(edge);
edge_nodes[{src_node,sink_node}].push_back(edge);
}
//Check for duplicate edges between pairs of nodes
for(const auto& kv : edge_nodes) {
const auto& edge_ids = kv.second;
TATUM_ASSERT_MSG(edge_ids.size() > 0, "Node pair must have at least one edge");
if(edge_ids.size() > 1) {
NodeId src_node = kv.first.first;
NodeId sink_node = kv.first.second;
std::stringstream ss;
ss << "Dulplicate timing edges found " << src_node << " -> " << sink_node
<< ", duplicate edges: ";
for(EdgeId edge : edge_ids) {
ss << edge << " ";
}
throw tatum::Error(ss.str(), src_node);
}
}
for(NodeId node : primary_inputs()) {
if(!node_in_edges(node).empty()) {
throw tatum::Error("Primary input nodes should have no incoming edges", node);
}
if(node_type(node) != NodeType::SOURCE) {
throw tatum::Error("Primary inputs should be only SOURCE nodes", node);
}
}
for(NodeId node : logical_outputs()) {
if(!node_out_edges(node).empty()) {
throw tatum::Error("Logical output node should have no outgoing edges", node);
}
if(node_type(node) != NodeType::SINK) {
throw tatum::Error("Logical outputs should be only SINK nodes", node);
}
}
auto sccs = identify_combinational_loops(*this);
if(!sccs.empty()) {
throw Error("Timing graph contains active combinational loops. "
"Either disable timing edges (to break the loops) or restructure the graph.");
//Future work:
//
// We could handle this internally by identifying the incoming and outgoing edges of the SCC,
// and estimating a 'max' delay through the SCC from each incoming to each outgoing edge.
// The SCC could then be replaced with a clique between SCC input and output edges.
//
// One possible estimate is to trace the longest path through the SCC without visiting a node
// more than once (although this is not gaurenteed to be conservative).
}
return true;
}
size_t TimingGraph::count_active_edges(edge_range edges_to_check) const {
size_t active_cnt = 0;
for(EdgeId edge : edges_to_check) {
if(!edge_disabled(edge)) {
++active_cnt;
}
}
return active_cnt;
}
//Returns sets of nodes involved in combinational loops
std::vector<std::vector<NodeId>> identify_combinational_loops(const TimingGraph& tg) {
constexpr size_t MIN_LOOP_SCC_SIZE = 2; //Any SCC of size >= 2 is a loop in the timing graph
return identify_strongly_connected_components(tg, MIN_LOOP_SCC_SIZE);
}
std::vector<NodeId> find_transitively_connected_nodes(const TimingGraph& tg,
const std::vector<NodeId> through_nodes,
size_t max_depth) {
std::vector<NodeId> nodes;
for(NodeId through_node : through_nodes) {
find_transitive_fanin_nodes_recurr(tg, nodes, through_node, max_depth);
find_transitive_fanout_nodes_recurr(tg, nodes, through_node, max_depth);
}
std::sort(nodes.begin(), nodes.end());
nodes.erase(std::unique(nodes.begin(), nodes.end()), nodes.end());
return nodes;
}
std::vector<NodeId> find_transitive_fanin_nodes(const TimingGraph& tg,
const std::vector<NodeId> sinks,
size_t max_depth) {
std::vector<NodeId> nodes;
for(NodeId sink : sinks) {
find_transitive_fanin_nodes_recurr(tg, nodes, sink, max_depth);
}
std::sort(nodes.begin(), nodes.end());
nodes.erase(std::unique(nodes.begin(), nodes.end()), nodes.end());
return nodes;
}
std::vector<NodeId> find_transitive_fanout_nodes(const TimingGraph& tg,
const std::vector<NodeId> sources,
size_t max_depth) {
std::vector<NodeId> nodes;
for(NodeId source : sources) {
find_transitive_fanout_nodes_recurr(tg, nodes, source, max_depth);
}
std::sort(nodes.begin(), nodes.end());
nodes.erase(std::unique(nodes.begin(), nodes.end()), nodes.end());
return nodes;
}
void find_transitive_fanin_nodes_recurr(const TimingGraph& tg,
std::vector<NodeId>& nodes,
const NodeId node,
size_t max_depth,
size_t depth) {
if(depth > max_depth) return;
nodes.push_back(node);
for(EdgeId in_edge : tg.node_in_edges(node)) {
if(tg.edge_disabled(in_edge)) continue;
NodeId src_node = tg.edge_src_node(in_edge);
find_transitive_fanin_nodes_recurr(tg, nodes, src_node, max_depth, depth + 1);
}
}
void find_transitive_fanout_nodes_recurr(const TimingGraph& tg,
std::vector<NodeId>& nodes,
const NodeId node,
size_t max_depth,
size_t depth) {
if(depth > max_depth) return;
nodes.push_back(node);
for(EdgeId out_edge : tg.node_out_edges(node)) {
if(tg.edge_disabled(out_edge)) continue;
NodeId sink_node = tg.edge_sink_node(out_edge);
find_transitive_fanout_nodes_recurr(tg, nodes, sink_node, max_depth, depth+1);
}
}
EdgeType infer_edge_type(const TimingGraph& tg, EdgeId edge) {
NodeId src_node = tg.edge_src_node(edge);
NodeId sink_node = tg.edge_sink_node(edge);
NodeType src_node_type = tg.node_type(src_node);
NodeType sink_node_type = tg.node_type(sink_node);
if(src_node_type == NodeType::IPIN && sink_node_type == NodeType::OPIN) {
return EdgeType::PRIMITIVE_COMBINATIONAL;
} else if ( (src_node_type == NodeType::OPIN || src_node_type == NodeType::SOURCE)
&& (sink_node_type == NodeType::IPIN || sink_node_type == NodeType::SINK || sink_node_type == NodeType::CPIN)) {
return EdgeType::INTERCONNECT;
} else if (src_node_type == NodeType::CPIN) {
if (sink_node_type == NodeType::SOURCE) {
return EdgeType::PRIMITIVE_CLOCK_LAUNCH;
} else if(sink_node_type == NodeType::SINK) {
return EdgeType::PRIMITIVE_CLOCK_CAPTURE;
} else {
throw tatum::Error("Invalid edge sink node (CPIN source node must connect to SOURCE or SINK sink node)", sink_node, edge);
}
} else {
throw tatum::Error("Invalid edge sink/source nodes", edge);
}
}
//Stream output for NodeType
std::ostream& operator<<(std::ostream& os, const NodeType type) {
if (type == NodeType::SOURCE) os << "SOURCE";
else if (type == NodeType::SINK) os << "SINK";
else if (type == NodeType::IPIN) os << "IPIN";
else if (type == NodeType::OPIN) os << "OPIN";
else if (type == NodeType::CPIN) os << "CPIN";
else throw std::domain_error("Unrecognized NodeType");
return os;
}
//Stream output for EdgeType
std::ostream& operator<<(std::ostream& os, const EdgeType type) {
if (type == EdgeType::PRIMITIVE_COMBINATIONAL) os << "PRIMITIVE_COMBINATIONAL";
else if (type == EdgeType::PRIMITIVE_CLOCK_LAUNCH) os << "PRIMITIVE_CLOCK_LAUNCH";
else if (type == EdgeType::PRIMITIVE_CLOCK_CAPTURE) os << "PRIMITIVE_CLOCK_CAPTURE";
else if (type == EdgeType::INTERCONNECT) os << "INTERCONNECT";
else throw std::domain_error("Unrecognized EdgeType");
return os;
}
std::ostream& operator<<(std::ostream& os, NodeId node_id) {
if(node_id == NodeId::INVALID()) {
return os << "Node(INVALID)";
} else {
return os << "Node(" << size_t(node_id) << ")";
}
}
std::ostream& operator<<(std::ostream& os, EdgeId edge_id) {
if(edge_id == EdgeId::INVALID()) {
return os << "Edge(INVALID)";
} else {
return os << "Edge(" << size_t(edge_id) << ")";
}
}
std::ostream& operator<<(std::ostream& os, DomainId domain_id) {
if(domain_id == DomainId::INVALID()) {
return os << "Domain(INVALID)";
} else {
return os << "Domain(" << size_t(domain_id) << ")";
}
}
std::ostream& operator<<(std::ostream& os, LevelId level_id) {
if(level_id == LevelId::INVALID()) {
return os << "Level(INVALID)";
} else {
return os << "Level(" << size_t(level_id) << ")";
}
}
} //namepsace

View File

@ -0,0 +1,335 @@
#pragma once
/**
* The 'TimingGraph' class represents a timing graph.
*
* Logically the timing graph is a directed graph connecting Primary Inputs (nodes with no
* fan-in, e.g. circuit inputs Flip-Flop Q pins) to Primary Outputs (nodes with no fan-out,
* e.g. circuit outputs, Flip-Flop D pins), connecting through intermediate nodes (nodes with
* both fan-in and fan-out, e.g. combinational logic).
*
* To make performing the forward/backward traversals through the timing graph easier, we actually
* store all edges as bi-directional edges.
*
* NOTE: We store only the static connectivity and node information in the 'TimingGraph' class.
* Other dynamic information (edge delays, node arrival/required times) is stored seperately.
* This means that most actions opearting on the timing graph (e.g. TimingAnalyzers) only
* require read-only access to the timing graph.
*
* Accessing Graph Data
* ======================
* For performance reasons (see Implementation section for details) we store all graph data
* in the 'TimingGraph' class, and do not use separate edge/node objects. To facilitate this,
* each node and edge in the graph is given a unique identifier (e.g. NodeId, EdgeId). These
* ID's can then be used to access the required data through the appropriate member function.
*
* Implementation
* ================
* The 'TimingGraph' class represents the timing graph in a "Struct of Arrays (SoA)" manner,
* rather than the more typical "Array of Structs (AoS)" data layout.
*
* By using a SoA layout we keep all data for a particular field (e.g. node types) in contiguous
* memory. Using an AoS layout the various fields accross nodes would *not* be contiguous
* (although the different fields within each object (e.g. a TimingNode class) would be contiguous.
* Since we typically perform operations on particular fields accross nodes the SoA layout performs
* better (and enables memory ordering optimizations). The edges are also stored in a SOA format.
*
* The SoA layout also motivates the ID based approach, which allows direct indexing into the required
* vector to retrieve data.
*
* Memory Ordering Optimizations
* ===============================
* SoA also allows several additional memory layout optimizations. In particular, we know the
* order that a (serial) timing analyzer will walk the timing graph (i.e. level-by-level, from the
* start to end node in each level).
*
* Using this information we can re-arrange the node and edge data to match this traversal order.
* This greatly improves caching behaviour, since pulling in data for one node immediately pulls
* in data for the next node/edge to be processed. This exploits both spatial and temporal locality,
* and ensures that each cache line pulled into the cache will (likely) be accessed multiple times
* before being evicted.
*
* Note that performing these optimizations is currently done explicity by calling the optimize_edge_layout()
* and optimize_node_layout() member functions. In the future (particularily if incremental modification
* support is added), it may be a good idea apply these modifications automatically as needed.
*
*/
#include <vector>
#include <set>
#include <limits>
#include "tatum/util/tatum_range.hpp"
#include "tatum/util/tatum_linear_map.hpp"
#include "tatum/TimingGraphFwd.hpp"
namespace tatum {
class TimingGraph {
public: //Public types
//Iterators
typedef tatum::util::linear_map<EdgeId,EdgeId>::const_iterator edge_iterator;
typedef tatum::util::linear_map<NodeId,NodeId>::const_iterator node_iterator;
typedef tatum::util::linear_map<LevelId,LevelId>::const_iterator level_iterator;
typedef tatum::util::linear_map<LevelId,LevelId>::const_reverse_iterator reverse_level_iterator;
//Ranges
typedef tatum::util::Range<node_iterator> node_range;
typedef tatum::util::Range<edge_iterator> edge_range;
typedef tatum::util::Range<level_iterator> level_range;
typedef tatum::util::Range<reverse_level_iterator> reverse_level_range;
public: //Public accessors
/*
* Node data accessors
*/
///\param id The id of a node
///\returns The type of the node
NodeType node_type(const NodeId id) const { return node_types_[id]; }
///\param id The node id
///\returns A range of all out-going edges the node drives
edge_range node_out_edges(const NodeId id) const { return tatum::util::make_range(node_out_edges_[id].begin(), node_out_edges_[id].end()); }
///\param id The node id
///\returns A range of all in-coming edges the node drives
edge_range node_in_edges(const NodeId id) const { return tatum::util::make_range(node_in_edges_[id].begin(), node_in_edges_[id].end()); }
///\param id The Node id
///\returns The number of active (undisabled) edges terminating at the node
size_t node_num_active_in_edges(const NodeId id) const;
///\param id The node id
///\returns The edge id corresponding to the incoming clock capture edge, or EdgeId::INVALID() if none
EdgeId node_clock_capture_edge(const NodeId id) const;
///\param id The node id
///\returns The edge id corresponding to the incoming clock launch edge, or EdgeId::INVALID() if none
EdgeId node_clock_launch_edge(const NodeId id) const;
/*
* Edge accessors
*/
///\param edge The id of an edge
///\returns The type of the edge
EdgeType edge_type(const EdgeId id) const;
///\param id The id of an edge
///\returns The node id of the edge's sink
NodeId edge_sink_node(const EdgeId id) const { return edge_sink_nodes_[id]; }
///\param id The id of an edge
///\returns The node id of the edge's source (driver)
NodeId edge_src_node(const EdgeId id) const {
return edge_src_nodes_[id];
}
///\param edge The id of an edge
///\returns Whether the edge is disabled (i.e. ignored during timing analysis)
bool edge_disabled(const EdgeId id) const { return edges_disabled_[id]; }
///\param src_node the edge's source node
///\param sink_node the edge's sink node
///\returns The edge betwen these the source and sink nodes, or EdgeId::INVALID() if none exists
EdgeId find_edge(const tatum::NodeId src_node, const tatum::NodeId sink_node) const;
/*
* Level accessors
*/
///\param level_id The level index in the graph
///\pre The graph must be levelized.
///\returns A range containing the nodes in the level
///\see levelize()
node_range level_nodes(const LevelId level_id) const {
TATUM_ASSERT_MSG(is_levelized_, "Timing graph must be levelized");
return tatum::util::make_range(level_nodes_[level_id].begin(),
level_nodes_[level_id].end());
}
///\pre The graph must be levelized.
///\returns A range containing the nodes which are primary inputs (i.e. SOURCE's with no fanin, corresponding to top level design inputs pins)
///\warning Not all SOURCE nodes in the graph are primary inputs (e.g. FF Q pins are SOURCE's but have incomming edges from the clock network)
///\see levelize()
node_range primary_inputs() const {
TATUM_ASSERT_MSG(is_levelized_, "Timing graph must be levelized");
return tatum::util::make_range(primary_inputs_.begin(), primary_inputs_.end());
}
///\pre The graph must be levelized.
///\returns A range containing the nodes which are logical outputs (i.e. nodes with no fan-out
// corresponding to: top level design output pins and FF D pins)
///\warning The logical outputs may be on different levels of the graph
///\see levelize()
node_range logical_outputs() const {
TATUM_ASSERT_MSG(is_levelized_, "Timing graph must be levelized");
return tatum::util::make_range(logical_outputs_.begin(), logical_outputs_.end());
}
/*
* Graph aggregate accessors
*/
//\returns A range containing all nodes in the graph
node_range nodes() const { return tatum::util::make_range(node_ids_.begin(), node_ids_.end()); }
//\returns A range containing all edges in the graph
edge_range edges() const { return tatum::util::make_range(edge_ids_.begin(), edge_ids_.end()); }
//\returns A range containing all levels in the graph
level_range levels() const {
TATUM_ASSERT_MSG(is_levelized_, "Timing graph must be levelized");
return tatum::util::make_range(level_ids_.begin(), level_ids_.end());
}
//\returns A range containing all levels in the graph in *reverse* order
reverse_level_range reversed_levels() const {
TATUM_ASSERT_MSG(is_levelized_, "Timing graph must be levelized");
return tatum::util::make_range(level_ids_.rbegin(), level_ids_.rend());
}
//\returns true if the timing graph is internally consistent, throws an exception if not
bool validate() const;
public: //Mutators
/*
* Graph modifiers
*/
///Adds a node to the timing graph
///\param type The type of the node to be added
///\warning Graph will likely need to be re-levelized after modification
NodeId add_node(const NodeType type);
///Adds an edge to the timing graph
///\param type The edge's type
///\param src_node The node id of the edge's driving node
///\param sink_node The node id of the edge's sink node
///\pre The src_node and sink_node must have been already added to the graph
///\warning Graph will likely need to be re-levelized after modification
EdgeId add_edge(const EdgeType type, const NodeId src_node, const NodeId sink_node);
///Removes a node (and it's associated edges) from the timing graph
///\param node_id The node to remove
///\warning This will leave invalid ID references in the timing graph until compress() is called
///\see add_node(), compress()
void remove_node(const NodeId node_id);
///Removes an edge from the timing graph
///\param edge_id The edge to remove
///\warning This will leave invalid ID references in the timing graph until compress() is called
///\see add_edge(), compress()
void remove_edge(const EdgeId edge_id);
///Disables an edge in the timing graph (e.g. to break a combinational loop)
///\param edge_id The edge to disable
///\see identify_combinational_loops()
void disable_edge(const EdgeId edge_id, bool disable=true);
///Compresses the Edge and Node ID spaces to eliminate invalid entries
///\returns A structure containing mappings from old to new IDs
GraphIdMaps compress();
/*
* Graph-level modification operations
*/
///Levelizes the graph.
///\post The graph topologically ordered (i.e. the level of each node is known)
///\post The primary outputs have been identified
void levelize();
/*
* Memory layout optimization operations
*/
///Optimizes the graph's internal memory layout for better performance
///\warning Old IDs will be invalidated
///\returns The mapping from old to new IDs
GraphIdMaps optimize_layout();
///Sets whether dangling combinational nodes is an error (if true) or not
void set_allow_dangling_combinational_nodes(bool value) {
allow_dangling_combinational_nodes_ = value;
}
private: //Internal helper functions
///\returns A mapping from old to new edge ids which is optimized for performance
// (i.e. cache locality)
tatum::util::linear_map<EdgeId,EdgeId> optimize_edge_layout() const;
///\returns A mapping from old to new edge ids which is optimized for performance
// (i.e. cache locality)
tatum::util::linear_map<NodeId,NodeId> optimize_node_layout() const;
void remap_nodes(const tatum::util::linear_map<NodeId,NodeId>& node_id_map);
void remap_edges(const tatum::util::linear_map<EdgeId,EdgeId>& edge_id_map);
void force_levelize();
bool valid_node_id(const NodeId node_id) const;
bool valid_edge_id(const EdgeId edge_id) const;
bool valid_level_id(const LevelId level_id) const;
bool validate_sizes() const;
bool validate_values() const;
bool validate_structure() const;
size_t count_active_edges(edge_range) const;
private: //Data
/*
* For improved memory locality, we use a Struct of Arrays (SoA)
* data layout, rather than Array of Structs (AoS)
*/
//Node data
tatum::util::linear_map<NodeId,NodeId> node_ids_; //The node IDs in the graph
tatum::util::linear_map<NodeId,NodeType> node_types_; //Type of node
tatum::util::linear_map<NodeId,std::vector<EdgeId>> node_in_edges_; //Incomiing edge IDs for node
tatum::util::linear_map<NodeId,std::vector<EdgeId>> node_out_edges_; //Out going edge IDs for node
//Edge data
tatum::util::linear_map<EdgeId,EdgeId> edge_ids_; //The edge IDs in the graph
tatum::util::linear_map<EdgeId,EdgeType> edge_types_; //Type of edge
tatum::util::linear_map<EdgeId,NodeId> edge_sink_nodes_; //Sink node for each edge
tatum::util::linear_map<EdgeId,NodeId> edge_src_nodes_; //Source node for each edge
tatum::util::linear_map<EdgeId,bool> edges_disabled_;
//Auxilary graph-level info, filled in by levelize()
tatum::util::linear_map<LevelId,LevelId> level_ids_; //The level IDs in the graph
tatum::util::linear_map<LevelId,std::vector<NodeId>> level_nodes_; //Nodes in each level
std::vector<NodeId> primary_inputs_; //Primary input nodes of the timing graph.
std::vector<NodeId> logical_outputs_; //Logical output nodes of the timing graph.
bool is_levelized_ = false; //Inidcates if the current levelization is valid
bool allow_dangling_combinational_nodes_ = false;
};
//Returns the set of nodes (Strongly Connected Components) that form loops in the timing graph
std::vector<std::vector<NodeId>> identify_combinational_loops(const TimingGraph& tg);
//Returns the set of nodes transitively connected (either fanin or fanout) to nodes in through_nodes
//up to max_depth (default infinite) hops away
std::vector<NodeId> find_transitively_connected_nodes(const TimingGraph& tg,
const std::vector<NodeId> through_nodes,
size_t max_depth=std::numeric_limits<size_t>::max());
//Returns the set of nodes in the transitive fanin of nodes in sinks up to max_depth (default infinite) hops away
std::vector<NodeId> find_transitive_fanin_nodes(const TimingGraph& tg,
const std::vector<NodeId> sinks,
size_t max_depth=std::numeric_limits<size_t>::max());
//Returns the set of nodes in the transitive fanout of nodes in sources up to max_depth (default infinite) hops away
std::vector<NodeId> find_transitive_fanout_nodes(const TimingGraph& tg,
const std::vector<NodeId> sources,
size_t max_depth=std::numeric_limits<size_t>::max());
EdgeType infer_edge_type(const TimingGraph& tg, EdgeId edge);
//Mappings from old to new IDs
struct GraphIdMaps {
GraphIdMaps(tatum::util::linear_map<NodeId,NodeId> node_map,
tatum::util::linear_map<EdgeId,EdgeId> edge_map)
: node_id_map(node_map), edge_id_map(edge_map) {}
tatum::util::linear_map<NodeId,NodeId> node_id_map;
tatum::util::linear_map<EdgeId,EdgeId> edge_id_map;
};
} //namepsace

View File

@ -0,0 +1,59 @@
#pragma once
/*
* Forward declarations for Timing Graph and related types
*/
#include <iosfwd>
#include "tatum/util/tatum_strong_id.hpp"
namespace tatum {
//The timing graph
class TimingGraph;
struct GraphIdMaps;
/**
* Potential types for nodes in the timing graph
*/
enum class NodeType : unsigned char {
SOURCE, //The start of a clock/data path
SINK, //The end of a clock/data path
IPIN, //An intermediate input pin
OPIN, //An intermediate output pin
CPIN, //An intermediate clock (input) pin
};
//Stream operators for NodeType
enum class EdgeType : unsigned char {
PRIMITIVE_COMBINATIONAL,
PRIMITIVE_CLOCK_LAUNCH,
PRIMITIVE_CLOCK_CAPTURE,
INTERCONNECT
};
//Stream operators for Edge/Node Type
std::ostream& operator<<(std::ostream& os, const NodeType type);
std::ostream& operator<<(std::ostream& os, const EdgeType type);
//Various IDs used by the timing graph
struct node_id_tag;
struct edge_id_tag;
struct domain_id_tag;
struct level_id_tag;
typedef tatum::util::StrongId<node_id_tag> NodeId;
typedef tatum::util::StrongId<edge_id_tag> EdgeId;
typedef tatum::util::StrongId<level_id_tag> LevelId;
//We expect far fewer domains than nodes/edges so we use a smaller
//data type, as this allows for more efficient packing in TimingTag
typedef tatum::util::StrongId<domain_id_tag,unsigned short> DomainId;
std::ostream& operator<<(std::ostream& os, NodeId node_id);
std::ostream& operator<<(std::ostream& os, EdgeId edge_id);
std::ostream& operator<<(std::ostream& os, DomainId domain_id);
std::ostream& operator<<(std::ostream& os, LevelId level_id);
} //namepsace

View File

@ -0,0 +1,36 @@
#ifndef TATUM_TIMING_GRAPH_NAME_RESOLVER_HPP
#define TATUM_TIMING_GRAPH_NAME_RESOLVER_HPP
#include <string>
#include <vector>
#include "TimingGraphFwd.hpp"
#include "tatum/base/DelayType.hpp"
#include "tatum/Time.hpp"
namespace tatum {
struct DelayComponent {
std::string type_name; //Type of element
std::string inst_name; //Unique identifier of element
Time delay; //Associated delay of element
};
struct EdgeDelayBreakdown {
std::vector<DelayComponent> components;
};
//Abstract interface for resolving timing graph nodes to human-readable strings
class TimingGraphNameResolver {
public:
virtual ~TimingGraphNameResolver() = default;
virtual std::string node_name(tatum::NodeId node) const = 0;
virtual std::string node_type_name(tatum::NodeId node) const = 0;
virtual EdgeDelayBreakdown edge_delay_breakdown(tatum::EdgeId /*edge*/, DelayType /*delay_type*/) const {
//Default no edge delay breakdown
return EdgeDelayBreakdown();
}
};
} //namespace
#endif

View File

@ -0,0 +1,710 @@
#include <map>
#include <fstream>
#include <sstream>
#include <iomanip>
#include <vector>
#include "tatum/util/tatum_math.hpp"
#include "tatum/util/OsFormatGuard.hpp"
#include "tatum/error.hpp"
#include "tatum/TimingReporter.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
namespace tatum { namespace detail {
float convert_to_printable_units(float value, float unit_scale) {
return value / unit_scale;
}
std::string to_printable_string(tatum::Time val, float unit_scale, size_t precision) {
std::stringstream ss;
if(!std::signbit(val.value())) ss << " "; //Pad possitive values so they align with negative prefixed with -
ss << std::fixed << std::setprecision(precision) << convert_to_printable_units(val.value(), unit_scale);
return ss.str();
}
void ReportTimingPathHelper::update_print_path(std::ostream& os, std::string point, tatum::Time path) {
tatum::Time incr = path - prev_path_;
print_path_line(os, point, to_printable_string(incr, unit_scale_, precision_), to_printable_string(path, unit_scale_, precision_));
prev_path_ = path;
}
void ReportTimingPathHelper::update_print_path_no_incr(std::ostream& os, std::string point, tatum::Time path) {
print_path_line(os, point, "", to_printable_string(path, unit_scale_, precision_));
prev_path_ = path;
}
void ReportTimingPathHelper::reset_path() {
prev_path_ = tatum::Time(0.);
}
void ReportTimingPathHelper::print_path_line_no_incr(std::ostream& os, std::string point, tatum::Time path) const {
print_path_line(os, point, "", to_printable_string(path, unit_scale_, precision_));
}
void ReportTimingPathHelper::print_path_line(std::ostream& os, std::string point, std::string incr, std::string path) const {
os << std::setw(point_width_) << std::left << point;
os << std::setw(incr_width_) << std::right << incr;
os << std::setw(path_width_) << std::right << path;
os << "\n";
}
void ReportTimingPathHelper::print_divider(std::ostream& os) const {
size_t cnt = point_width_ + incr_width_ + path_width_;
for(size_t i = 0; i < cnt; ++i) {
os << "-";
}
os << "\n";
}
}} //namespace
namespace tatum {
/*
* Public member functions
*/
TimingReporter::TimingReporter(const TimingGraphNameResolver& name_resolver,
const TimingGraph& timing_graph,
const TimingConstraints& timing_constraints,
float unit_scale,
size_t precision)
: name_resolver_(name_resolver)
, timing_graph_(timing_graph)
, timing_constraints_(timing_constraints)
, unit_scale_(unit_scale)
, precision_(precision) {
//pass
}
void TimingReporter::report_timing_setup(std::string filename,
const SetupTimingAnalyzer& setup_analyzer,
size_t npaths) const {
std::ofstream os(filename);
report_timing_setup(os, setup_analyzer, npaths);
}
void TimingReporter::report_timing_setup(std::ostream& os,
const SetupTimingAnalyzer& setup_analyzer,
size_t npaths) const {
auto paths = path_collector_.collect_worst_setup_timing_paths(timing_graph_, setup_analyzer, npaths);
report_timing(os, paths);
}
void TimingReporter::report_timing_hold(std::string filename,
const HoldTimingAnalyzer& hold_analyzer,
size_t npaths) const {
std::ofstream os(filename);
report_timing_hold(os, hold_analyzer, npaths);
}
void TimingReporter::report_timing_hold(std::ostream& os,
const HoldTimingAnalyzer& hold_analyzer,
size_t npaths) const {
auto paths = path_collector_.collect_worst_hold_timing_paths(timing_graph_, hold_analyzer, npaths);
report_timing(os, paths);
}
void TimingReporter::report_skew_setup(std::string filename,
const SetupTimingAnalyzer& setup_analyzer,
size_t nworst) const {
std::ofstream os(filename);
report_skew_setup(os, setup_analyzer, nworst);
}
void TimingReporter::report_skew_setup(std::ostream& os,
const SetupTimingAnalyzer& setup_analyzer,
size_t nworst) const {
auto paths = path_collector_.collect_worst_setup_skew_paths(timing_graph_, timing_constraints_, setup_analyzer, nworst);
os << "#Clock skew for setup timing startpoint/endpoint\n";
os << "\n";
report_skew(os, paths, TimingType::SETUP);
os << "#End of clock skew for setup timing startpoint/endpoint report\n";
}
void TimingReporter::report_skew_hold(std::string filename,
const HoldTimingAnalyzer& hold_analyzer,
size_t nworst) const {
std::ofstream os(filename);
report_skew_hold(os, hold_analyzer, nworst);
}
void TimingReporter::report_skew_hold(std::ostream& os,
const HoldTimingAnalyzer& hold_analyzer,
size_t nworst) const {
auto paths = path_collector_.collect_worst_hold_skew_paths(timing_graph_, timing_constraints_, hold_analyzer, nworst);
os << "#Clock skew for hold timing startpoint/endpoint\n";
os << "\n";
report_skew(os, paths, TimingType::HOLD);
os << "#End of clock skew for hold timing startpoint/endpoint report\n";
}
void TimingReporter::report_unconstrained_setup(std::string filename,
const tatum::SetupTimingAnalyzer& setup_analyzer) const {
std::ofstream os(filename);
report_unconstrained_setup(os, setup_analyzer);
}
void TimingReporter::report_unconstrained_setup(std::ostream& os,
const tatum::SetupTimingAnalyzer& setup_analyzer) const {
detail::SetupTagRetriever tag_retriever(setup_analyzer);
os << "#Unconstrained setup timing startpoint/endpoint\n";
os << "\n";
os << "timing_node_id node_type node_name\n";
os << "-------------- --------- ---------\n";
report_unconstrained(os, NodeType::SOURCE, tag_retriever);
report_unconstrained(os, NodeType::SINK, tag_retriever);
os << "\n";
os << "#End of unconstrained setup startpoint/endpoint report\n";
}
void TimingReporter::report_unconstrained_hold(std::string filename,
const tatum::HoldTimingAnalyzer& hold_analyzer) const {
std::ofstream os(filename);
report_unconstrained_hold(os, hold_analyzer);
}
void TimingReporter::report_unconstrained_hold(std::ostream& os,
const tatum::HoldTimingAnalyzer& hold_analyzer) const {
detail::HoldTagRetriever tag_retriever(hold_analyzer);
os << "#Unconstrained hold timing startpoint/endpoint\n";
os << "\n";
os << "timing_node_id node_type node_name\n";
os << "-------------- --------- ---------\n";
report_unconstrained(os, NodeType::SOURCE, tag_retriever);
report_unconstrained(os, NodeType::SINK, tag_retriever);
os << "\n";
os << "#End of unconstrained hold startpoint/endpoint report\n";
}
/*
* Private member functions
*/
void TimingReporter::report_timing(std::ostream& os,
const std::vector<TimingPath>& paths) const {
tatum::OsFormatGuard flag_guard(os);
os << "#Timing report of worst " << paths.size() << " path(s)\n";
os << "# Unit scale: " << std::setprecision(0) << std::scientific << unit_scale_ << " seconds\n";
os << "# Output precision: " << precision_ << "\n";
os << "\n";
size_t i = 0;
for(const auto& path : paths) {
os << "#Path " << ++i << "\n";
report_timing_path(os, path);
os << "\n";
}
os << "#End of timing report\n";
}
void TimingReporter::report_timing_path(std::ostream& os, const TimingPath& timing_path) const {
std::string divider = "--------------------------------------------------------------------------------";
TimingPathInfo path_info = timing_path.path_info();
os << "Startpoint: " << name_resolver_.node_name(path_info.startpoint())
<< " (" << name_resolver_.node_type_name(path_info.startpoint())
<< " clocked by " << timing_constraints_.clock_domain_name(path_info.launch_domain())
<< ")\n";
os << "Endpoint : " << name_resolver_.node_name(path_info.endpoint())
<< " (" << name_resolver_.node_type_name(path_info.endpoint())
<< " clocked by " << timing_constraints_.clock_domain_name(path_info.capture_domain())
<< ")\n";
if(path_info.type() == TimingType::SETUP) {
os << "Path Type : setup" << "\n";
} else {
TATUM_ASSERT_MSG(path_info.type() == TimingType::HOLD, "Expected path type SETUP or HOLD");
os << "Path Type : hold" << "\n";
}
os << "\n";
size_t point_print_width = estimate_point_print_width(timing_path);
//Helper to track path state and output formatting
detail::ReportTimingPathHelper path_helper(unit_scale_, precision_, point_print_width);
path_helper.print_path_line(os, "Point", " Incr", " Path");
path_helper.print_divider(os);
//Launch path
Time arr_time;
Time arr_path;
{
path_helper.reset_path();
arr_path = report_timing_clock_launch_subpath(os, path_helper, timing_path.clock_launch_path(), path_info.launch_domain(), path_info.type());
arr_path = report_timing_data_arrival_subpath(os, path_helper, timing_path.data_arrival_path(), path_info.launch_domain(), path_info.type(), arr_path);
{
//Final arrival time
const TimingPathElem& path_elem = *(--timing_path.data_arrival_path().elements().end());
TATUM_ASSERT(timing_graph_.node_type(path_elem.node()) == NodeType::SINK);
TATUM_ASSERT(path_elem.tag().type() == TagType::DATA_ARRIVAL);
arr_time = path_elem.tag().time();
path_helper.update_print_path_no_incr(os, "data arrival time", arr_time);
os << "\n";
}
//Sanity check the arrival time calculated by this timing report (i.e. path) and the one calculated by
//the analyzer (i.e. arr_time) agree
if(!nearly_equal(arr_time, arr_path)) {
os.flush();
std::stringstream ss;
ss << "Internal Error: analyzer arrival time (" << arr_time.value() << ")"
<< " differs from timing report path arrival time (" << arr_path.value() << ")"
<< " beyond tolerance";
throw tatum::Error(ss.str());
}
}
//Capture path (required time)
Time req_time;
Time req_path;
{
path_helper.reset_path();
req_path = report_timing_clock_capture_subpath(os, path_helper, timing_path.clock_capture_path(),
path_info.launch_domain(), path_info.capture_domain(),
path_info.endpoint(),
path_info.type());
const TimingPathElem& path_elem = timing_path.data_required_element();
req_path = report_timing_data_required_element(os, path_helper, path_elem,
path_info.capture_domain(), path_info.type(),
req_path);
//Final arrival time
req_time = path_elem.tag().time();
path_helper.update_print_path_no_incr(os, "data required time", req_time);
//Sanity check required time
if(!nearly_equal(req_time, req_path)) {
os.flush();
std::stringstream ss;
ss << "Internal Error: analyzer required time (" << req_time.value() << ")"
<< " differs from report_timing path required time (" << req_path.value() << ")"
<< " beyond tolerance";
throw tatum::Error(ss.str());
}
}
//Summary and slack
path_helper.print_divider(os);
if (path_info.type() == TimingType::SETUP) {
path_helper.print_path_line_no_incr(os, "data required time", req_time);
path_helper.print_path_line_no_incr(os, "data arrival time", -arr_time);
} else {
TATUM_ASSERT(path_info.type() == TimingType::HOLD);
path_helper.print_path_line_no_incr(os, "data required time", -req_time);
path_helper.print_path_line_no_incr(os, "data arrival time", arr_time);
}
path_helper.print_divider(os);
Time slack = timing_path.slack_tag().time();
if(slack.value() < 0. || std::signbit(slack.value())) {
path_helper.print_path_line_no_incr(os, "slack (VIOLATED)", slack);
} else {
path_helper.print_path_line_no_incr(os, "slack (MET)", slack);
}
os << "\n";
os.flush();
//Sanity check slack
Time path_slack;
if (path_info.type() == TimingType::SETUP) {
path_slack = req_path - arr_path;
} else {
TATUM_ASSERT(path_info.type() == TimingType::HOLD);
path_slack = arr_path - req_path;
}
if(!nearly_equal(slack, path_slack)) {
os.flush();
std::stringstream ss;
ss << "Internal Error: analyzer slack (" << slack << ")"
<< " differs from report_timing path slack (" << path_slack << ")"
<< " beyond tolerance";
throw tatum::Error(ss.str());
}
}
void TimingReporter::report_unconstrained(std::ostream& os, const NodeType type, const detail::TagRetriever& tag_retriever) const {
for(NodeId node : timing_graph_.nodes()) {
NodeType node_type = timing_graph_.node_type(node);
if(node_type == type) {
auto tags = tag_retriever.tags(node);
if(!is_constrained(node_type, tags)) {
os << size_t(node)
<< " " << name_resolver_.node_type_name(node)
<< " " << name_resolver_.node_name(node)
<< "\n";
}
}
}
}
void TimingReporter::report_skew(std::ostream& os, const std::vector<SkewPath>& skew_paths, TimingType timing_type) const {
tatum::OsFormatGuard flag_guard(os);
int i = 1;
for (const auto& skew_path : skew_paths) {
os << "#Skew Path " << i << "\n";
report_skew_path(os, skew_path, timing_type);
os << "\n";
++i;
}
}
void TimingReporter::report_skew_path(std::ostream& os, const SkewPath& skew_path, TimingType timing_type) const {
auto& launch_path = skew_path.clock_launch_path;
auto& capture_path = skew_path.clock_capture_path;
NodeId launch_node = skew_path.data_launch_node;
NodeId capture_node = skew_path.data_capture_node;
os << "Startpoint: " << name_resolver_.node_name(launch_node)
<< " (" << name_resolver_.node_type_name(launch_node)
<< " clocked by " << timing_constraints_.clock_domain_name(skew_path.launch_domain)
<< ")\n";
os << "Endpoint : " << name_resolver_.node_name(capture_node)
<< " (" << name_resolver_.node_type_name(capture_node)
<< " clocked by " << timing_constraints_.clock_domain_name(skew_path.capture_domain)
<< ")\n";
if(timing_type == TimingType::SETUP) {
os << "Path Type : setup" << "\n";
} else {
TATUM_ASSERT_MSG(timing_type == TimingType::HOLD, "Expected path type SETUP or HOLD");
os << "Path Type : hold" << "\n";
}
os << "\n";
std::string launch_name = name_resolver_.node_name(launch_node)
+ " ("
+ name_resolver_.node_type_name(launch_node)
+ ")";
std::string capture_name = name_resolver_.node_name(capture_node)
+ " ("
+ name_resolver_.node_type_name(capture_node)
+ ")";
size_t point_print_width = std::max(launch_name.size(), capture_name.size());
point_print_width = std::max(point_print_width, std::string("clock data capture (normalized)").size());
detail::ReportTimingPathHelper path_helper(unit_scale_, precision_, point_print_width);
path_helper.print_path_line(os, "Point", " Incr", " Path");
path_helper.print_divider(os);
Time data_launch_time = report_timing_clock_launch_subpath(os, path_helper, launch_path, skew_path.launch_domain, timing_type);
TATUM_ASSERT(nearly_equal(data_launch_time, skew_path.clock_launch_arrival));
path_helper.update_print_path_no_incr(os, "data launch", data_launch_time);
os << "\n";
os << "\n";
path_helper.reset_path();
Time data_capture_time = report_timing_clock_capture_subpath(os, path_helper, capture_path, skew_path.launch_domain, skew_path.capture_domain, skew_path.data_capture_node, timing_type);
TATUM_ASSERT(nearly_equal(data_capture_time, skew_path.clock_capture_arrival));
path_helper.update_print_path_no_incr(os, "data capture", data_capture_time);
path_helper.print_divider(os);
Time clock_constraint;
if (timing_type == TimingType::SETUP) {
clock_constraint = timing_constraints_.setup_constraint(skew_path.launch_domain, skew_path.capture_domain);
} else {
clock_constraint = timing_constraints_.hold_constraint(skew_path.launch_domain, skew_path.capture_domain);
}
path_helper.print_path_line_no_incr(os, "data capture", data_capture_time);
path_helper.print_path_line_no_incr(os, "clock constraint", -clock_constraint);
path_helper.print_path_line_no_incr(os, "data launch", -data_launch_time);
path_helper.print_divider(os);
Time skew = data_capture_time - clock_constraint - data_launch_time;
TATUM_ASSERT(nearly_equal(skew, skew_path.clock_skew));
path_helper.print_path_line_no_incr(os, "skew", skew);
}
Time TimingReporter::report_timing_clock_launch_subpath(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingSubPath& subpath,
DomainId domain,
TimingType timing_type) const {
Time path(0.);
{
//Launch clock origin
path += Time(0.);
std::string point = "clock " + timing_constraints_.clock_domain_name(domain) + " (rise edge)";
path_helper.update_print_path(os, point, path);
}
return report_timing_clock_subpath(os, path_helper, subpath, domain, timing_type, path, Time(0.));
}
Time TimingReporter::report_timing_clock_capture_subpath(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingSubPath& subpath,
DomainId launch_domain,
DomainId capture_domain,
NodeId capture_node,
TimingType timing_type) const {
Time path(0.);
Time offset(0.); //Offset required since the clock launch tag doesn't have the actual (potentially
//capture node dependent) constraint annotated. We determine this up front by
//looking up the actual constraint and add it into the subpath printed as an offset.
//Note that the required times 'path' values are correct it is only the increments
//which need to be offset when being reported (with the exception of the original
//rising edge).
{
//Launch clock origin
if (timing_type == TimingType::SETUP) {
path = timing_constraints_.setup_constraint(launch_domain, capture_domain, capture_node);
offset = path - timing_constraints_.setup_constraint(launch_domain, capture_domain);
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
path = timing_constraints_.hold_constraint(launch_domain, capture_domain, capture_node);
offset = path - timing_constraints_.hold_constraint(launch_domain, capture_domain);
}
//os << "[offset]" << tatum::detail::to_printable_string(offset, 1e-9,3) << "\n";
std::string point = "clock " + timing_constraints_.clock_domain_name(capture_domain) + " (rise edge)";
path_helper.update_print_path(os, point, path);
}
path = report_timing_clock_subpath(os, path_helper, subpath, capture_domain, timing_type, path, offset);
{
//Uncertainty
Time uncertainty;
if(timing_type == TimingType::SETUP) {
uncertainty = -Time(timing_constraints_.setup_clock_uncertainty(launch_domain, capture_domain));
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
uncertainty = Time(timing_constraints_.hold_clock_uncertainty(launch_domain, capture_domain));
}
path += uncertainty;
path_helper.update_print_path(os, "clock uncertainty", path + offset);
}
return path;
}
Time TimingReporter::report_timing_clock_subpath(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingSubPath& subpath,
DomainId domain,
TimingType timing_type,
Time path,
Time offset) const {
{
//Launch clock latency
Time latency;
if (timing_type == TimingType::SETUP) {
//Setup clock launches late
latency = Time(timing_constraints_.source_latency(domain, ArrivalType::LATE));
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
//Hold clock launches early
latency = Time(timing_constraints_.source_latency(domain, ArrivalType::EARLY));
}
path += latency;
std::string point = "clock source latency";
path_helper.update_print_path(os, point, path);
}
DelayType delay_type;
if (timing_type == TimingType::SETUP) {
delay_type = DelayType::MAX;
} else {
delay_type = DelayType::MIN;
}
//Launch clock path
for(const TimingPathElem& path_elem : subpath.elements()) {
//Ask the application for a detailed breakdown of the edge delays
auto delay_breakdown = name_resolver_.edge_delay_breakdown(path_elem.incomming_edge(), delay_type);
if (!delay_breakdown.components.empty()) {
//Application provided detailed delay breakdown of edge delay, report it
for (auto& delay_component : delay_breakdown.components) {
std::string point = "|";
if (!delay_component.inst_name.empty()) {
point += " " + delay_component.inst_name;
}
if (!delay_component.type_name.empty()) {
point += " (" + delay_component.type_name + ")";
}
path += delay_component.delay;
path_helper.update_print_path(os, point, path);
}
TATUM_ASSERT_MSG(nearly_equal(path, path_elem.tag().time()), "Delay breakdown must match calculated delay");
}
std::string point = name_resolver_.node_name(path_elem.node()) + " (" + name_resolver_.node_type_name(path_elem.node()) + ")";
path = path_elem.tag().time();
path_helper.update_print_path(os, point, path + offset);
}
return path;
}
Time TimingReporter::report_timing_data_arrival_subpath(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingSubPath& subpath,
DomainId domain,
TimingType timing_type,
Time path) const {
{
//Input constraint
TATUM_ASSERT(subpath.elements().size() > 0);
const TimingPathElem& path_elem = *(subpath.elements().begin());
Time input_constraint;
if (timing_type == TimingType::SETUP) {
input_constraint = timing_constraints_.input_constraint(path_elem.node(), domain, DelayType::MAX);
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
input_constraint = timing_constraints_.input_constraint(path_elem.node(), domain, DelayType::MIN);
}
if(input_constraint.valid()) {
path += Time(input_constraint);
path_helper.update_print_path(os, "input external delay", path);
}
}
DelayType delay_type;
if (timing_type == TimingType::SETUP) {
delay_type = DelayType::MAX;
} else {
delay_type = DelayType::MIN;
}
//Launch data
for(const TimingPathElem& path_elem : subpath.elements()) {
//Ask the application for a detailed breakdown of the edge delays
auto delay_breakdown = name_resolver_.edge_delay_breakdown(path_elem.incomming_edge(), delay_type);
if (!delay_breakdown.components.empty()) {
//Application provided detailed delay breakdown of edge delay, report it
for (auto& delay_component : delay_breakdown.components) {
std::string point = "|";
if (!delay_component.inst_name.empty()) {
point += " " + delay_component.inst_name;
}
if (!delay_component.type_name.empty()) {
point += " (" + delay_component.type_name + ")";
}
path += delay_component.delay;
path_helper.update_print_path(os, point, path);
}
TATUM_ASSERT_MSG(nearly_equal(path, path_elem.tag().time()), "Delay breakdown must match calculated delay");
}
std::string point = name_resolver_.node_name(path_elem.node()) + " (" + name_resolver_.node_type_name(path_elem.node()) + ")";
EdgeId in_edge = path_elem.incomming_edge();
if(in_edge && timing_graph_.edge_type(in_edge) == EdgeType::PRIMITIVE_CLOCK_LAUNCH) {
point += " [clock-to-output]";
}
path = path_elem.tag().time();
path_helper.update_print_path(os, point, path);
}
return path;
}
Time TimingReporter::report_timing_data_required_element(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingPathElem& data_required_elem,
DomainId capture_domain,
TimingType timing_type,
Time path) const {
{
TATUM_ASSERT(timing_graph_.node_type(data_required_elem.node()) == NodeType::SINK);
//Setup/hold time
EdgeId in_edge = data_required_elem.incomming_edge();
if(in_edge && timing_graph_.edge_type(in_edge) == EdgeType::PRIMITIVE_CLOCK_CAPTURE) {
std::string point;
if(timing_type == TimingType::SETUP) {
point = "cell setup time";
} else {
TATUM_ASSERT_MSG(timing_type == TimingType::HOLD, "Expected path type SETUP or HOLD");
point = "cell hold time";
}
path = data_required_elem.tag().time();
path_helper.update_print_path(os, point, path);
}
//Output constraint
Time output_constraint;
if (timing_type == TimingType::SETUP) {
output_constraint = timing_constraints_.output_constraint(data_required_elem.node(), capture_domain, DelayType::MAX);
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
output_constraint = timing_constraints_.output_constraint(data_required_elem.node(), capture_domain, DelayType::MIN);
}
if(output_constraint.valid()) {
path += -Time(output_constraint);
path_helper.update_print_path(os, "output external delay", path);
}
}
return path;
}
bool TimingReporter::nearly_equal(const Time& lhs, const Time& rhs) const {
return tatum::util::nearly_equal(lhs.value(), rhs.value(), absolute_error_tolerance_, relative_error_tolerance_);
}
size_t TimingReporter::estimate_point_print_width(const TimingPath& path) const {
size_t width = 60; //default
for(auto subpath : {path.clock_launch_path(), path.data_arrival_path(), path.clock_capture_path()}) {
for(auto elem : subpath.elements()) {
//Take the longest typical point name
std::string point = name_resolver_.node_name(elem.node()) + " (" + name_resolver_.node_type_name(elem.node()) + ")";
point += " [clock-to-output]";
//Keep the max over all points
width = std::max(width, point.size());
}
}
return width;
}
} //namespace tatum

View File

@ -0,0 +1,162 @@
#ifndef TATUM_TIMING_REPORTER_HPP
#define TATUM_TIMING_REPORTER_HPP
#include <iosfwd>
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
#include "tatum/timing_analyzers.hpp"
#include "tatum/TimingGraphNameResolver.hpp"
#include "tatum/report/TimingPath.hpp"
#include "tatum/report/TimingPathCollector.hpp"
#include "tatum/report/TimingReportTagRetriever.hpp"
namespace tatum { namespace detail {
float convert_to_printable_units(float value, float unit_scale);
std::string to_printable_string(tatum::Time val, float unit_scale, size_t precision);
//Helper class to track path state and formatting while writing timing path reports
class ReportTimingPathHelper {
public:
ReportTimingPathHelper(float unit_scale, size_t precision, size_t point_width=60, size_t incr_width=10, size_t path_width=10)
: unit_scale_(unit_scale)
, precision_(precision)
, point_width_(point_width)
, incr_width_(incr_width)
, path_width_(path_width) {}
void update_print_path(std::ostream& os, std::string point, tatum::Time path);
void update_print_path_no_incr(std::ostream& os, std::string point, tatum::Time path);
void reset_path();
void print_path_line_no_incr(std::ostream& os, std::string point, tatum::Time path) const;
void print_path_line(std::ostream& os, std::string point, std::string incr, std::string path) const;
void print_divider(std::ostream& os) const;
private:
float unit_scale_;
size_t precision_;
size_t point_width_;
size_t incr_width_;
size_t path_width_;
tatum::Time prev_path_ = tatum::Time(0.);
};
}} //namespace
namespace tatum {
constexpr size_t REPORT_TIMING_DEFAULT_NPATHS=100;
//A class for generating timing reports
class TimingReporter {
public:
TimingReporter(const TimingGraphNameResolver& name_resolver,
const tatum::TimingGraph& timing_graph,
const tatum::TimingConstraints& timing_constraints,
float unit_scale=1e-9,
size_t precision=3);
public:
void report_timing_setup(std::string filename, const tatum::SetupTimingAnalyzer& setup_analyzer, size_t npaths=REPORT_TIMING_DEFAULT_NPATHS) const;
void report_timing_setup(std::ostream& os, const tatum::SetupTimingAnalyzer& setup_analyzer, size_t npaths=REPORT_TIMING_DEFAULT_NPATHS) const;
void report_timing_hold(std::string filename, const tatum::HoldTimingAnalyzer& hold_analyzer, size_t npaths=REPORT_TIMING_DEFAULT_NPATHS) const;
void report_timing_hold(std::ostream& os, const tatum::HoldTimingAnalyzer& hold_analyzer, size_t npaths=REPORT_TIMING_DEFAULT_NPATHS) const;
void report_skew_setup(std::string filename, const tatum::SetupTimingAnalyzer& setup_analyzer, size_t nworst=REPORT_TIMING_DEFAULT_NPATHS) const;
void report_skew_setup(std::ostream& os, const tatum::SetupTimingAnalyzer& setup_analyzer, size_t nworst=REPORT_TIMING_DEFAULT_NPATHS) const;
void report_skew_hold(std::string filename, const tatum::HoldTimingAnalyzer& hold_analyzer, size_t nworst=REPORT_TIMING_DEFAULT_NPATHS) const;
void report_skew_hold(std::ostream& os, const tatum::HoldTimingAnalyzer& hold_analyzer, size_t nworst=REPORT_TIMING_DEFAULT_NPATHS) const;
void report_unconstrained_setup(std::string filename, const tatum::SetupTimingAnalyzer& setup_analyzer) const;
void report_unconstrained_setup(std::ostream& os, const tatum::SetupTimingAnalyzer& setup_analyzer) const;
void report_unconstrained_hold(std::string filename, const tatum::HoldTimingAnalyzer& hold_analyzer) const;
void report_unconstrained_hold(std::ostream& os, const tatum::HoldTimingAnalyzer& hold_analyzer) const;
private:
struct PathSkew {
NodeId launch_node;
NodeId capture_node;
DomainId launch_domain;
DomainId capture_domain;
Time clock_launch;
Time clock_capture;
Time clock_capture_normalized;
Time clock_constraint;
Time clock_skew;
};
private:
void report_timing(std::ostream& os, const std::vector<TimingPath>& paths) const;
void report_timing_path(std::ostream& os, const TimingPath& path) const;
void report_unconstrained(std::ostream& os, const NodeType type, const detail::TagRetriever& tag_retriever) const;
void report_skew(std::ostream& os, const std::vector<SkewPath>& paths, TimingType timing_type) const;
void report_skew_path(std::ostream& os, const SkewPath& skew_path, TimingType timing_type) const;
Time report_timing_clock_launch_subpath(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingSubPath& subpath,
DomainId domain,
TimingType timing_type) const;
Time report_timing_clock_capture_subpath(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingSubPath& subpath,
DomainId launch_domain,
DomainId capture_domain,
NodeId capture_node,
TimingType timing_type) const;
Time report_timing_data_arrival_subpath(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingSubPath& subpath,
DomainId domain,
TimingType timing_type,
Time path) const;
Time report_timing_data_required_element(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingPathElem& data_required_elem,
DomainId capture_domain,
TimingType timing_type,
Time path) const;
//Reports clock latency and path (caller should handle rising edge)
Time report_timing_clock_subpath(std::ostream& os,
detail::ReportTimingPathHelper& path_helper,
const TimingSubPath& subpath,
DomainId domain,
TimingType timing_type,
Time path,
Time offset) const;
bool nearly_equal(const tatum::Time& lhs, const tatum::Time& rhs) const;
size_t estimate_point_print_width(const TimingPath& path) const;
private:
const TimingGraphNameResolver& name_resolver_;
const TimingGraph& timing_graph_;
const TimingConstraints& timing_constraints_;
float unit_scale_ = 1e-9;
size_t precision_ = 3;
float relative_error_tolerance_ = 1.e-5;
float absolute_error_tolerance_ = 1e-13; //Sub pico-second
TimingPathCollector path_collector_;
};
} //namespace
#endif

View File

@ -0,0 +1,10 @@
#ifndef TATUM_TIMING_REPORTER_FWD_HPP
#define TATUM_TIMING_REPORTER_FWD_HPP
namespace tatum {
//A class for generating timing reports
class TimingReporter;
} //namespace
#endif

View File

@ -0,0 +1,134 @@
#ifndef TATUM_ANALYZER_FACTORY_HPP
#define TATUM_ANALYZER_FACTORY_HPP
#include <memory>
#include "analyzer_factory_fwd.hpp"
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
#include "tatum/graph_walkers.hpp"
#include "tatum/timing_analyzers.hpp"
#include "tatum/analyzers/full_timing_analyzers.hpp"
namespace tatum {
/** \file
* This file defines the AnalyzerFactory class used to construct timing analyzers.
*
* We assume that the user has already defined the timing graph, constraints and
* thier own delay calculator:
*
* TimingGraph timing_graph;
* TimingConstraints timing_constraints;
* MyDelayCalculator delay_calculator;
*
* //... code that initializes the graph, constraints and delay calculator
*
* We can now create the analyzer. For example, if we wanted to perform a setup
* analysis:
*
* auto setup_analyzer = AnalyzerFactory<SetupAnalysis>::make(timing_graph,
* timing_constraints,
* delay_calculator);
*
* We can similarily generate analyzers for other types of analysis, for instance Hold:
*
* auto hold_analyzer = AnalyzerFactory<SetupAnalysis>::make(timing_graph,
* timing_constraints,
* delay_calculator);
*
* We can also build a parallel analyzer (instead of the serial default):
*
* auto parallel_setup_analyzer = AnalyzerFactory<SetupAnalysis,ParallelWalker>::make(timing_graph,
* timing_constraints,
* delay_calculator);
*
* The AnalzyerFactory returns a std::unique_ptr to the appropriate TimingAnalyzer sub-class:
*
* SetupAnalysis => SetupTimingAnalyzer
* HoldAnalysis => HoldTimingAnalyzer
* SetupHoldAnalysis => SetupHoldTimingAnalyzer
*/
///Factor class to construct timing analyzers
///
///\tparam Visitor The analysis type visitor (e.g. SetupAnalysis)
///\tparam GraphWalker The graph walker to use (defaults to serial traversals)
template<class Visitor,
class GraphWalker>
struct AnalyzerFactory {
//We use the dependent_false template to detect if the un-specialized AnalyzerFactor
//template is being instantiated
template<typename T>
struct dependent_false : std::false_type {};
//Error if the unspecialized template is instantiated
static_assert(dependent_false<Visitor>::value,
"Must specify a specialized analysis visitor type (e.g. SetupAnalysis, HoldAnalysis, SetupHoldAnalysis)");
//We provide the function definition to avoid spurious errors when the
//unspecialized template is instantiated
///Builds a timing analyzer
///
///\param timing_graph The timing graph to associate with the analyzer
///\param timing_constraints The timing constraints to associate with the analyzer
///\param delay_calc The edge delay calculator to use. Note that this is a custom user defined type,
/// but it must satisfy the the interface defined by DelayCalculator (\see DelayCalculator)
///
///\returns std::unique_ptr to the analyzer
static std::unique_ptr<TimingAnalyzer> make(const TimingGraph& timing_graph,
const TimingConstraints& timing_constraints,
const DelayCalculator& delay_calc);
};
//Specialize for setup
template<class GraphWalker>
struct AnalyzerFactory<SetupAnalysis,GraphWalker> {
static std::unique_ptr<SetupTimingAnalyzer> make(const TimingGraph& timing_graph,
const TimingConstraints& timing_constraints,
const DelayCalculator& delay_calc) {
return std::unique_ptr<SetupTimingAnalyzer>(
new detail::FullSetupTimingAnalyzer<GraphWalker>(timing_graph,
timing_constraints,
delay_calc)
);
}
};
//Specialize for hold
template<class GraphWalker>
struct AnalyzerFactory<HoldAnalysis,GraphWalker> {
static std::unique_ptr<HoldTimingAnalyzer> make(const TimingGraph& timing_graph,
const TimingConstraints& timing_constraints,
const DelayCalculator& delay_calc) {
return std::unique_ptr<HoldTimingAnalyzer>(
new detail::FullHoldTimingAnalyzer<GraphWalker>(timing_graph,
timing_constraints,
delay_calc)
);
}
};
//Specialize for combined setup and hold
template<class GraphWalker>
struct AnalyzerFactory<SetupHoldAnalysis,GraphWalker> {
static std::unique_ptr<SetupHoldTimingAnalyzer> make(const TimingGraph& timing_graph,
const TimingConstraints& timing_constraints,
const DelayCalculator& delay_calc) {
return std::unique_ptr<SetupHoldTimingAnalyzer>(
new detail::FullSetupHoldTimingAnalyzer<GraphWalker>(timing_graph,
timing_constraints,
delay_calc)
);
}
};
} //namepsace
#endif

View File

@ -0,0 +1,18 @@
#ifndef TATUM_ANALYZER_FACTORY_FWD_HPP
#define TATUM_ANALYZER_FACTORY_FWD_HPP
#include "graph_walkers_fwd.hpp"
namespace tatum {
///Factor class to construct timing analyzers
///
///\tparam Visitor The analysis type visitor (e.g. SetupAnalysis)
///\tparam GraphWalker The graph walker to use (defaults to serial traversals)
template<class Visitor,
class GraphWalker=SerialWalker>
struct AnalyzerFactory;
} //namepsace
#endif

View File

@ -0,0 +1,80 @@
#pragma once
#include "tatum/graph_walkers/SerialWalker.hpp"
#include "tatum/HoldAnalysis.hpp"
#include "tatum/analyzers/HoldTimingAnalyzer.hpp"
#include "tatum/base/validate_timing_graph_constraints.hpp"
namespace tatum { namespace detail {
/**
* A concrete implementation of a HoldTimingAnalyzer.
*
* This is a full (i.e. non-incremental) analyzer, which fully
* re-analyzes the timing graph whenever update_timing_impl() is
* called.
*/
template<class GraphWalker=SerialWalker>
class FullHoldTimingAnalyzer : public HoldTimingAnalyzer {
public:
FullHoldTimingAnalyzer(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints, const DelayCalculator& delay_calculator)
: HoldTimingAnalyzer()
, timing_graph_(timing_graph)
, timing_constraints_(timing_constraints)
, delay_calculator_(delay_calculator)
, hold_visitor_(timing_graph_.nodes().size(), timing_graph_.edges().size()) {
validate_timing_graph_constraints(timing_graph_, timing_constraints_);
//Initialize profiling data
graph_walker_.set_profiling_data("total_analysis_sec", 0.);
graph_walker_.set_profiling_data("analysis_sec", 0.);
graph_walker_.set_profiling_data("num_full_updates", 0.);
}
protected:
virtual void update_timing_impl() override {
update_hold_timing();
}
virtual void update_hold_timing_impl() override {
auto start_time = Clock::now();
graph_walker_.do_reset(timing_graph_, hold_visitor_);
graph_walker_.do_arrival_pre_traversal(timing_graph_, timing_constraints_, hold_visitor_);
graph_walker_.do_arrival_traversal(timing_graph_, timing_constraints_, delay_calculator_, hold_visitor_);
graph_walker_.do_required_pre_traversal(timing_graph_, timing_constraints_, hold_visitor_);
graph_walker_.do_required_traversal(timing_graph_, timing_constraints_, delay_calculator_, hold_visitor_);
graph_walker_.do_update_slack(timing_graph_, delay_calculator_, hold_visitor_);
double analysis_sec = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
//Record profiling data
double total_analysis_sec = analysis_sec + graph_walker_.get_profiling_data("total_analysis_sec");
graph_walker_.set_profiling_data("total_analysis_sec", total_analysis_sec);
graph_walker_.set_profiling_data("analysis_sec", analysis_sec);
graph_walker_.set_profiling_data("num_full_updates", graph_walker_.get_profiling_data("num_full_updates") + 1);
}
double get_profiling_data_impl(std::string key) const override { return graph_walker_.get_profiling_data(key); }
size_t num_unconstrained_startpoints_impl() const override { return graph_walker_.num_unconstrained_startpoints(); }
size_t num_unconstrained_endpoints_impl() const override { return graph_walker_.num_unconstrained_endpoints(); }
TimingTags::tag_range hold_tags_impl(NodeId node_id) const override { return hold_visitor_.hold_tags(node_id); }
TimingTags::tag_range hold_tags_impl(NodeId node_id, TagType type) const override { return hold_visitor_.hold_tags(node_id, type); }
TimingTags::tag_range hold_edge_slacks_impl(EdgeId edge_id) const override { return hold_visitor_.hold_edge_slacks(edge_id); }
TimingTags::tag_range hold_node_slacks_impl(NodeId node_id) const override { return hold_visitor_.hold_node_slacks(node_id); }
private:
const TimingGraph& timing_graph_;
const TimingConstraints& timing_constraints_;
const DelayCalculator& delay_calculator_;
HoldAnalysis hold_visitor_;
GraphWalker graph_walker_;
typedef std::chrono::duration<double> dsec;
typedef std::chrono::high_resolution_clock Clock;
};
}} //namepsace

View File

@ -0,0 +1,110 @@
#pragma once
#include "tatum/graph_walkers/SerialWalker.hpp"
#include "tatum/SetupHoldAnalysis.hpp"
#include "tatum/analyzers/SetupHoldTimingAnalyzer.hpp"
#include "tatum/base/validate_timing_graph_constraints.hpp"
namespace tatum { namespace detail {
/**
* A concrete implementation of a SetupHoldTimingAnalyzer.
*
* This is a full (i.e. non-incremental) analyzer, which fully
* re-analyzes the timing graph whenever update_timing_impl() is
* called.
*/
template<class GraphWalker=SerialWalker>
class FullSetupHoldTimingAnalyzer : public SetupHoldTimingAnalyzer {
public:
FullSetupHoldTimingAnalyzer(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints, const DelayCalculator& delay_calculator)
: SetupHoldTimingAnalyzer()
, timing_graph_(timing_graph)
, timing_constraints_(timing_constraints)
, delay_calculator_(delay_calculator)
, setup_hold_visitor_(timing_graph_.nodes().size(), timing_graph_.edges().size()) {
validate_timing_graph_constraints(timing_graph_, timing_constraints_);
//Initialize profiling data
graph_walker_.set_profiling_data("total_analysis_sec", 0.);
graph_walker_.set_profiling_data("analysis_sec", 0.);
graph_walker_.set_profiling_data("num_full_updates", 0.);
}
protected:
//Update both setup and hold simultaneously (this is more efficient than updating them sequentially)
virtual void update_timing_impl() override {
auto start_time = Clock::now();
graph_walker_.do_reset(timing_graph_, setup_hold_visitor_);
graph_walker_.do_arrival_pre_traversal(timing_graph_, timing_constraints_, setup_hold_visitor_);
graph_walker_.do_arrival_traversal(timing_graph_, timing_constraints_, delay_calculator_, setup_hold_visitor_);
graph_walker_.do_required_pre_traversal(timing_graph_, timing_constraints_, setup_hold_visitor_);
graph_walker_.do_required_traversal(timing_graph_, timing_constraints_, delay_calculator_, setup_hold_visitor_);
graph_walker_.do_update_slack(timing_graph_, delay_calculator_, setup_hold_visitor_);
double analysis_sec = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
//Record profiling data
double total_analysis_sec = analysis_sec + graph_walker_.get_profiling_data("total_analysis_sec");
graph_walker_.set_profiling_data("total_analysis_sec", total_analysis_sec);
graph_walker_.set_profiling_data("analysis_sec", analysis_sec);
graph_walker_.set_profiling_data("num_full_updates", graph_walker_.get_profiling_data("num_full_updates") + 1);
}
//Update only setup timing
virtual void update_setup_timing_impl() override {
auto& setup_visitor = setup_hold_visitor_.setup_visitor();
graph_walker_.do_reset(timing_graph_, setup_visitor);
graph_walker_.do_arrival_pre_traversal(timing_graph_, timing_constraints_, setup_visitor);
graph_walker_.do_arrival_traversal(timing_graph_, timing_constraints_, delay_calculator_, setup_visitor);
graph_walker_.do_required_pre_traversal(timing_graph_, timing_constraints_, setup_visitor);
graph_walker_.do_required_traversal(timing_graph_, timing_constraints_, delay_calculator_, setup_visitor);
graph_walker_.do_update_slack(timing_graph_, delay_calculator_, setup_visitor);
}
//Update only hold timing
virtual void update_hold_timing_impl() override {
auto& hold_visitor = setup_hold_visitor_.hold_visitor();
graph_walker_.do_reset(timing_graph_, hold_visitor);
graph_walker_.do_arrival_pre_traversal(timing_graph_, timing_constraints_, hold_visitor);
graph_walker_.do_arrival_traversal(timing_graph_, timing_constraints_, delay_calculator_, hold_visitor);
graph_walker_.do_required_pre_traversal(timing_graph_, timing_constraints_, hold_visitor);
graph_walker_.do_required_traversal(timing_graph_, timing_constraints_, delay_calculator_, hold_visitor);
graph_walker_.do_update_slack(timing_graph_, delay_calculator_, hold_visitor);
}
double get_profiling_data_impl(std::string key) const override { return graph_walker_.get_profiling_data(key); }
size_t num_unconstrained_startpoints_impl() const override { return graph_walker_.num_unconstrained_startpoints(); }
size_t num_unconstrained_endpoints_impl() const override { return graph_walker_.num_unconstrained_endpoints(); }
TimingTags::tag_range setup_tags_impl(NodeId node_id) const override { return setup_hold_visitor_.setup_tags(node_id); }
TimingTags::tag_range setup_tags_impl(NodeId node_id, TagType type) const override { return setup_hold_visitor_.setup_tags(node_id, type); }
TimingTags::tag_range setup_edge_slacks_impl(EdgeId edge_id) const override { return setup_hold_visitor_.setup_edge_slacks(edge_id); }
TimingTags::tag_range setup_node_slacks_impl(NodeId node_id) const override { return setup_hold_visitor_.setup_node_slacks(node_id); }
TimingTags::tag_range hold_tags_impl(NodeId node_id) const override { return setup_hold_visitor_.hold_tags(node_id); }
TimingTags::tag_range hold_tags_impl(NodeId node_id, TagType type) const override { return setup_hold_visitor_.hold_tags(node_id, type); }
TimingTags::tag_range hold_edge_slacks_impl(EdgeId edge_id) const override { return setup_hold_visitor_.hold_edge_slacks(edge_id); }
TimingTags::tag_range hold_node_slacks_impl(NodeId node_id) const override { return setup_hold_visitor_.hold_node_slacks(node_id); }
private:
const TimingGraph& timing_graph_;
const TimingConstraints& timing_constraints_;
const DelayCalculator& delay_calculator_;
SetupHoldAnalysis setup_hold_visitor_;
GraphWalker graph_walker_;
typedef std::chrono::duration<double> dsec;
typedef std::chrono::high_resolution_clock Clock;
};
}} //namepsace

View File

@ -0,0 +1,84 @@
#pragma once
#include "tatum/graph_walkers/SerialWalker.hpp"
#include "tatum/SetupAnalysis.hpp"
#include "tatum/analyzers/SetupTimingAnalyzer.hpp"
#include "tatum/base/validate_timing_graph_constraints.hpp"
namespace tatum { namespace detail {
/**
* A concrete implementation of a SetupTimingAnalyzer.
*
* This is a full (i.e. non-incremental) analyzer, which fully
* re-analyzes the timing graph whenever update_timing_impl() is
* called.
*/
template<class GraphWalker=SerialWalker>
class FullSetupTimingAnalyzer : public SetupTimingAnalyzer {
public:
FullSetupTimingAnalyzer(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints, const DelayCalculator& delay_calculator)
: SetupTimingAnalyzer()
, timing_graph_(timing_graph)
, timing_constraints_(timing_constraints)
, delay_calculator_(delay_calculator)
, setup_visitor_(timing_graph_.nodes().size(), timing_graph_.edges().size()) {
validate_timing_graph_constraints(timing_graph_, timing_constraints_);
//Initialize profiling data
graph_walker_.set_profiling_data("total_analysis_sec", 0.);
graph_walker_.set_profiling_data("analysis_sec", 0.);
graph_walker_.set_profiling_data("num_full_updates", 0.);
}
protected:
virtual void update_timing_impl() override {
update_setup_timing();
}
virtual void update_setup_timing_impl() override {
auto start_time = Clock::now();
graph_walker_.do_reset(timing_graph_, setup_visitor_);
graph_walker_.do_arrival_pre_traversal(timing_graph_, timing_constraints_, setup_visitor_);
graph_walker_.do_arrival_traversal(timing_graph_, timing_constraints_, delay_calculator_, setup_visitor_);
graph_walker_.do_required_pre_traversal(timing_graph_, timing_constraints_, setup_visitor_);
graph_walker_.do_required_traversal(timing_graph_, timing_constraints_, delay_calculator_, setup_visitor_);
graph_walker_.do_update_slack(timing_graph_, delay_calculator_, setup_visitor_);
double analysis_sec = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
//Record profiling data
double total_analysis_sec = analysis_sec + graph_walker_.get_profiling_data("total_analysis_sec");
graph_walker_.set_profiling_data("total_analysis_sec", total_analysis_sec);
graph_walker_.set_profiling_data("analysis_sec", analysis_sec);
graph_walker_.set_profiling_data("num_full_updates", graph_walker_.get_profiling_data("num_full_updates") + 1);
}
//TimingAnalyzer
double get_profiling_data_impl(std::string key) const override { return graph_walker_.get_profiling_data(key); }
size_t num_unconstrained_startpoints_impl() const override { return graph_walker_.num_unconstrained_startpoints(); }
size_t num_unconstrained_endpoints_impl() const override { return graph_walker_.num_unconstrained_endpoints(); }
//SetupTimingAnalyzer
TimingTags::tag_range setup_tags_impl(NodeId node_id) const override { return setup_visitor_.setup_tags(node_id); }
TimingTags::tag_range setup_tags_impl(NodeId node_id, TagType type) const override { return setup_visitor_.setup_tags(node_id, type); }
TimingTags::tag_range setup_edge_slacks_impl(EdgeId edge_id) const override { return setup_visitor_.setup_edge_slacks(edge_id); }
TimingTags::tag_range setup_node_slacks_impl(NodeId node_id) const override { return setup_visitor_.setup_node_slacks(node_id); }
private:
const TimingGraph& timing_graph_;
const TimingConstraints& timing_constraints_;
const DelayCalculator& delay_calculator_;
SetupAnalysis setup_visitor_;
GraphWalker graph_walker_;
typedef std::chrono::duration<double> dsec;
typedef std::chrono::high_resolution_clock Clock;
};
}} //namepsace

View File

@ -0,0 +1,33 @@
#pragma once
#include "tatum/analyzers/TimingAnalyzer.hpp"
#include "tatum/tags/TimingTags.hpp"
#include "tatum/TimingGraphFwd.hpp"
namespace tatum {
/**
* HoldTimingAnalyzer represents an abstract interface for all timing analyzers
* performing hold (i.e. short-path) analysis.
*
* Note the use of virtual inheritance to avoid duplicating the base class
*/
class HoldTimingAnalyzer : public virtual TimingAnalyzer {
public:
///Update only the hold timing related arrival/required tags
void update_hold_timing() { update_hold_timing_impl(); }
TimingTags::tag_range hold_tags(NodeId node_id) const { return hold_tags_impl(node_id); }
TimingTags::tag_range hold_tags(NodeId node_id, TagType type) const { return hold_tags_impl(node_id, type); }
TimingTags::tag_range hold_slacks(EdgeId edge_id) const { return hold_edge_slacks_impl(edge_id); }
TimingTags::tag_range hold_slacks(NodeId node_id) const { return hold_node_slacks_impl(node_id); }
protected:
virtual void update_hold_timing_impl() = 0;
virtual TimingTags::tag_range hold_tags_impl(NodeId node_id) const = 0;
virtual TimingTags::tag_range hold_tags_impl(NodeId node_id, TagType type) const = 0;
virtual TimingTags::tag_range hold_edge_slacks_impl(EdgeId edge_id) const = 0;
virtual TimingTags::tag_range hold_node_slacks_impl(NodeId node_id) const = 0;
};
} //namepsace

View File

@ -0,0 +1,25 @@
#pragma once
#include "SetupTimingAnalyzer.hpp"
#include "HoldTimingAnalyzer.hpp"
namespace tatum {
/**
* SetupHoldTimingAnalyzer represents an abstract interface for all timing analyzers
* performing combined setup and hold (i.e. simultaneous long and short-path) analysis.
*
* A combined analysis tends to be more efficient than performing two seperate analysies
* (provided both setup and hold data are truly required).
*
* It implements both the SetupTimingAnalyzer and HoldTimingAnalyzer interfaces.
*/
class SetupHoldTimingAnalyzer : public SetupTimingAnalyzer, public HoldTimingAnalyzer {
//Empty (all behaviour inherited)
//
// Note that SetupTiminganalyzer and HoldTimingAnalyzer used virtual inheritance, so
// there is no ambiguity when inheriting from both (there will be only one base class
// instance).
};
} //namepsace

View File

@ -0,0 +1,33 @@
#pragma once
#include "tatum/analyzers/TimingAnalyzer.hpp"
#include "tatum/tags/TimingTags.hpp"
#include "tatum/TimingGraphFwd.hpp"
namespace tatum {
/**
* SetupTimingAnalyzer represents an abstract interface for all timing analyzers
* performing setup (i.e. long-path) analysis.
*
* Note the use of virtual inheritance to avoid duplicating the base class
*/
class SetupTimingAnalyzer : public virtual TimingAnalyzer {
public:
///Update only the setup timing related arrival/required tags
void update_setup_timing() { update_setup_timing_impl(); }
TimingTags::tag_range setup_tags(NodeId node_id) const { return setup_tags_impl(node_id); }
TimingTags::tag_range setup_tags(NodeId node_id, TagType type) const { return setup_tags_impl(node_id, type); }
TimingTags::tag_range setup_slacks(EdgeId edge_id) const { return setup_edge_slacks_impl(edge_id); }
TimingTags::tag_range setup_slacks(NodeId node_id) const { return setup_node_slacks_impl(node_id); }
protected:
virtual void update_setup_timing_impl() = 0;
virtual TimingTags::tag_range setup_tags_impl(NodeId node_id) const = 0;
virtual TimingTags::tag_range setup_tags_impl(NodeId node_id, TagType type) const = 0;
virtual TimingTags::tag_range setup_edge_slacks_impl(EdgeId edge_id) const = 0;
virtual TimingTags::tag_range setup_node_slacks_impl(NodeId node_id) const = 0;
};
} //namepsace

View File

@ -0,0 +1,45 @@
#pragma once
#include <string>
namespace tatum {
/**
* TimingAnalyzer represents an abstract interface for all timing analyzers,
* which can be:
* - updated (update_timing())
* - reset (reset_timing()).
*
* This is the most abstract interface provided (it does not allow access
* to any calculated data). As a result this interface is suitable for
* code that needs to update timing analysis, but does not *use* the
* analysis results itself.
*
* If you need the analysis results you should be using one of the dervied
* classes.
*
* \see SetupTimingAnalyzer
* \see HoldTimingAnalyzer
* \see SetupHoldTimingAnalyzer
*/
class TimingAnalyzer {
public:
virtual ~TimingAnalyzer() {}
///Perform timing analysis to update timing information (i.e. arrival & required times)
void update_timing() { update_timing_impl(); }
double get_profiling_data(std::string key) const { return get_profiling_data_impl(key); }
virtual size_t num_unconstrained_startpoints() const { return num_unconstrained_startpoints_impl(); }
virtual size_t num_unconstrained_endpoints() const { return num_unconstrained_endpoints_impl(); }
protected:
virtual void update_timing_impl() = 0;
virtual double get_profiling_data_impl(std::string key) const = 0;
virtual size_t num_unconstrained_startpoints_impl() const = 0;
virtual size_t num_unconstrained_endpoints_impl() const = 0;
};
} //namepsace

View File

@ -0,0 +1,14 @@
#pragma once
/** \file
* This file defines concrete implementations of the TimingAnalyzer interfaces.
*
* In particular these concrete analyzers are 'full' (i.e. non-incremental) timing analyzers,
* ever call to update_timing_impl() fully re-analyze the timing graph.
*
* Note that at this time only 'full' analyzers are supported.
*/
#include "FullSetupTimingAnalyzer.hpp"
#include "FullHoldTimingAnalyzer.hpp"
#include "FullSetupHoldTimingAnalyzer.hpp"

View File

@ -0,0 +1,14 @@
#ifndef TATUM_ARRIVAL_TYPE_HPP
#define TATUM_ARRIVAL_TYPE_HPP
namespace tatum {
//The type of timing analysis
enum class ArrivalType {
EARLY,
LATE
};
} //namespace
#endif

View File

@ -0,0 +1,14 @@
#ifndef TATUM_DELAY_TYPE_HPP
#define TATUM_DELAY_TYPE_HPP
namespace tatum {
//The type of timing analysis
enum class DelayType {
MAX,
MIN
};
} //namespace
#endif

View File

@ -0,0 +1,15 @@
#ifndef TATUM_TIMING_TYPE_HPP
#define TATUM_TIMING_TYPE_HPP
namespace tatum {
//The type of timing analysis
enum class TimingType {
SETUP,
HOLD,
UNKOWN
};
} //namespace
#endif

View File

@ -0,0 +1,96 @@
#include <stack>
#include <algorithm>
#include "tatum/TimingGraph.hpp"
#include "tatum/base/loop_detect.hpp"
#include "tatum/util/tatum_linear_map.hpp"
namespace tatum {
//Internal data used in identify_strongly_connected_components() and strongconnect()
struct NodeSccInfo {
bool on_stack = false;
int index = -1;
int low_link = -1;
};
void strongconnect(const tatum::TimingGraph& tg,
tatum::NodeId node,
int& cur_index,
std::stack<tatum::NodeId>& stack,
tatum::util::linear_map<tatum::NodeId,NodeSccInfo>& node_info,
std::vector<std::vector<tatum::NodeId>>& sccs,
size_t min_size);
//Returns sets of nodes (i.e. strongly connected componenets) which exceed the specifided min_size
std::vector<std::vector<NodeId>> identify_strongly_connected_components(const TimingGraph& tg, size_t min_size) {
//This uses Tarjan's algorithm which identifies Strongly Connected Components (SCCs) in O(|V| + |E|) time
int curr_index = 0;
std::stack<NodeId> stack;
tatum::util::linear_map<NodeId,NodeSccInfo> node_info(tg.nodes().size());
std::vector<std::vector<NodeId>> sccs;
for(NodeId node : tg.nodes()) {
if(node_info[node].index == -1) {
strongconnect(tg, node, curr_index, stack, node_info, sccs, min_size);
}
}
return sccs;
}
void strongconnect(const TimingGraph& tg,
NodeId node,
int& cur_index,
std::stack<NodeId>& stack,
tatum::util::linear_map<NodeId,NodeSccInfo>& node_info,
std::vector<std::vector<NodeId>>& sccs,
size_t min_size) {
node_info[node].index = cur_index;
node_info[node].low_link = cur_index;
++cur_index;
stack.push(node);
node_info[node].on_stack = true;
for(EdgeId edge : tg.node_out_edges(node)) {
if(tg.edge_disabled(edge)) continue;
NodeId sink_node = tg.edge_sink_node(edge);
if(node_info[sink_node].index == -1) {
//Have not visited sink_node yet
strongconnect(tg, sink_node, cur_index, stack, node_info, sccs, min_size);
node_info[node].low_link = std::min(node_info[node].low_link, node_info[sink_node].low_link);
} else if(node_info[sink_node].on_stack) {
//sink_node is part of the SCC
node_info[node].low_link = std::min(node_info[node].low_link, node_info[sink_node].low_link);
}
}
if(node_info[node].low_link == node_info[node].index) {
//node is the root of a SCC
std::vector<NodeId> scc;
NodeId scc_node;
do {
scc_node = stack.top();
stack.pop();
node_info[scc_node].on_stack = false;
scc.push_back(scc_node);
} while(scc_node != node);
if(scc.size() >= min_size) {
sccs.push_back(scc);
}
}
}
}

View File

@ -0,0 +1,17 @@
#ifndef TATUM_LOOP_DETECT
#define TATUM_LOOP_DETECT
#include <set>
#include <vector>
#include "tatum/TimingGraphFwd.hpp"
namespace tatum {
//Returns the set of Strongly Connected Components with
//size >= min_size found in the timing graph
std::vector<std::vector<NodeId>> identify_strongly_connected_components(const TimingGraph& tg, size_t min_size);
}
#endif

View File

@ -0,0 +1,331 @@
#include <ctime>
#include <iostream>
#include <iomanip>
#include <algorithm>
#include <map>
#include <string>
#include <sstream>
#include <fstream>
#include <numeric>
#include "tatum/util/tatum_assert.hpp"
#include "tatum/util/OsFormatGuard.hpp"
#include "tatum/base/sta_util.hpp"
using std::cout;
using std::endl;
namespace tatum {
float time_sec(struct timespec start, struct timespec end) {
float time = end.tv_sec - start.tv_sec;
time += (end.tv_nsec - start.tv_nsec) * 1e-9;
return time;
}
void print_histogram(const std::vector<float>& values, int nbuckets) {
nbuckets = std::min(values.size(), (size_t) nbuckets);
int values_per_bucket = ceil((float) values.size() / nbuckets);
std::vector<float> buckets(nbuckets);
//Sum up each bucket
for(size_t i = 0; i < values.size(); i++) {
int ibucket = i / values_per_bucket;
buckets[ibucket] += values[i];
}
//Normalize to get average value
for(int i = 0; i < nbuckets; i++) {
buckets[i] /= values_per_bucket;
}
float max_bucket_val = *std::max_element(buckets.begin(), buckets.end());
//Print the histogram
std::ios_base::fmtflags saved_flags = cout.flags();
std::streamsize prec = cout.precision();
std::streamsize width = cout.width();
std::streamsize int_width = ceil(log10(values.size()));
std::streamsize float_prec = 1;
int histo_char_width = 60;
//cout << "\t" << endl;
for(int i = 0; i < nbuckets; i++) {
cout << std::setw(int_width) << i*values_per_bucket << ":" << std::setw(int_width) << (i+1)*values_per_bucket - 1;
cout << " " << std::scientific << std::setprecision(float_prec) << buckets[i];
cout << " ";
for(int j = 0; j < histo_char_width*(buckets[i]/max_bucket_val); j++) {
cout << "*";
}
cout << endl;
}
cout.flags(saved_flags);
cout.precision(prec);
cout.width(width);
}
void print_level_histogram(const TimingGraph& tg, int nbuckets) {
cout << "Levels Width Histogram" << endl;
std::vector<float> level_widths;
for(const LevelId level_id : tg.levels()) {
level_widths.push_back(tg.level_nodes(level_id).size());
}
print_histogram(level_widths, nbuckets);
}
void print_node_fanin_histogram(const TimingGraph& tg, int nbuckets) {
cout << "Node Fan-in Histogram" << endl;
std::vector<float> fanin;
for(const NodeId node_id : tg.nodes()) {
fanin.push_back(tg.node_in_edges(node_id).size());
}
std::sort(fanin.begin(), fanin.end(), std::greater<float>());
print_histogram(fanin, nbuckets);
}
void print_node_fanout_histogram(const TimingGraph& tg, int nbuckets) {
cout << "Node Fan-out Histogram" << endl;
std::vector<float> fanout;
for(const NodeId node_id : tg.nodes()) {
fanout.push_back(tg.node_out_edges(node_id).size());
}
std::sort(fanout.begin(), fanout.end(), std::greater<float>());
print_histogram(fanout, nbuckets);
}
void print_timing_graph(std::shared_ptr<const TimingGraph> tg) {
for(const NodeId node_id : tg->nodes()) {
cout << "Node: " << node_id;
cout << " Type: " << tg->node_type(node_id);
cout << " Out Edges: " << tg->node_out_edges(node_id).size();
cout << "\n";
for(EdgeId edge_id : tg->node_out_edges(node_id)) {
TATUM_ASSERT(tg->edge_src_node(edge_id) == node_id);
NodeId sink_node_id = tg->edge_sink_node(edge_id);
cout << "\tEdge src node: " << node_id << " sink node: " << sink_node_id << "\n";
}
}
}
void print_levelization(std::shared_ptr<const TimingGraph> tg) {
cout << "Num Levels: " << tg->levels().size() << "\n";
for(const LevelId level_id : tg->levels()) {
const auto& level = tg->level_nodes(level_id);
cout << "Level " << level_id << ": " << level.size() << " nodes" << "\n";
cout << "\t";
for(auto node_id : level) {
cout << node_id << " ";
}
cout << "\n";
}
}
void print_setup_tags_histogram(const TimingGraph& tg, const SetupTimingAnalyzer& analyzer) {
OsFormatGuard format_guard(std::cout);
const int int_width = 8;
const int flt_width = 2;
auto totaler = [](int total, const std::map<int,int>::value_type& kv) {
return total + kv.second;
};
std::cout << "Node Total Setup Tag Count Histogram:" << std::endl;
std::map<int,int> total_tag_cnts;
for(const NodeId i : tg.nodes()) {
total_tag_cnts[analyzer.setup_tags(i).size()]++;
}
int total_tags = std::accumulate(total_tag_cnts.begin(), total_tag_cnts.end(), 0, totaler);
for(const auto& kv : total_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_tags << ")" << std::endl;
}
std::cout << "Node Data Arrival Setup Tag Count Histogram:" << std::endl;
std::map<int,int> data_arrival_tag_cnts;
for(const NodeId i : tg.nodes()) {
data_arrival_tag_cnts[analyzer.setup_tags(i, TagType::DATA_ARRIVAL).size()]++;
}
int total_data_arrival_tags = std::accumulate(data_arrival_tag_cnts.begin(), data_arrival_tag_cnts.end(), 0, totaler);
for(const auto& kv : data_arrival_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_data_arrival_tags << ")" << std::endl;
}
std::cout << "Node Data Required Setup Tag Count Histogram:" << std::endl;
std::map<int,int> data_required_tag_cnts;
for(const NodeId i : tg.nodes()) {
data_required_tag_cnts[analyzer.setup_tags(i, TagType::DATA_REQUIRED).size()]++;
}
int total_data_tags = std::accumulate(data_required_tag_cnts.begin(), data_required_tag_cnts.end(), 0, totaler);
for(const auto& kv : data_required_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_data_tags << ")" << std::endl;
}
std::cout << "Node Clock Launch Setup Tag Count Histogram:" << std::endl;
std::map<int,int> clock_launch_tag_cnts;
for(const NodeId i : tg.nodes()) {
clock_launch_tag_cnts[analyzer.setup_tags(i, TagType::CLOCK_LAUNCH).size()]++;
}
int total_clock_launch_tags = std::accumulate(clock_launch_tag_cnts.begin(), clock_launch_tag_cnts.end(), 0, totaler);
for(const auto& kv : clock_launch_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_clock_launch_tags << ")" << std::endl;
}
std::cout << "Node Clock Capture Setup Tag Count Histogram:" << std::endl;
std::map<int,int> clock_capture_tag_cnts;
for(const NodeId i : tg.nodes()) {
clock_capture_tag_cnts[analyzer.setup_tags(i, TagType::CLOCK_CAPTURE).size()]++;
}
int total_clock_capture_tags = std::accumulate(clock_capture_tag_cnts.begin(), clock_capture_tag_cnts.end(), 0, totaler);
for(const auto& kv : clock_capture_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_clock_capture_tags << ")" << std::endl;
}
}
void print_hold_tags_histogram(const TimingGraph& tg, const HoldTimingAnalyzer& analyzer) {
OsFormatGuard format_guard(std::cout);
const int int_width = 8;
const int flt_width = 2;
auto totaler = [](int total, const std::map<int,int>::value_type& kv) {
return total + kv.second;
};
std::cout << "Node Total Hold Tag Count Histogram:" << std::endl;
std::map<int,int> total_tag_cnts;
for(const NodeId i : tg.nodes()) {
total_tag_cnts[analyzer.hold_tags(i).size()]++;
}
int total_tags = std::accumulate(total_tag_cnts.begin(), total_tag_cnts.end(), 0, totaler);
for(const auto& kv : total_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_tags << ")" << std::endl;
}
std::cout << "Node Data Hold Tag Count Histogram:" << std::endl;
std::map<int,int> data_tag_cnts;
for(const NodeId i : tg.nodes()) {
data_tag_cnts[analyzer.hold_tags(i).size()]++;
}
int total_data_tags = std::accumulate(data_tag_cnts.begin(), data_tag_cnts.end(), 0, totaler);
for(const auto& kv : data_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_data_tags << ")" << std::endl;
}
std::cout << "Node Clock Launch Setup Tag Count Histogram:" << std::endl;
std::map<int,int> clock_launch_tag_cnts;
for(const NodeId i : tg.nodes()) {
clock_launch_tag_cnts[analyzer.hold_tags(i, TagType::CLOCK_LAUNCH).size()]++;
}
int total_clock_launch_tags = std::accumulate(clock_launch_tag_cnts.begin(), clock_launch_tag_cnts.end(), 0, totaler);
for(const auto& kv : clock_launch_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_clock_launch_tags << ")" << std::endl;
}
std::cout << "Node Clock Capture Setup Tag Count Histogram:" << std::endl;
std::map<int,int> clock_capture_tag_cnts;
for(const NodeId i : tg.nodes()) {
clock_capture_tag_cnts[analyzer.hold_tags(i, TagType::CLOCK_CAPTURE).size()]++;
}
int total_clock_capture_tags = std::accumulate(clock_capture_tag_cnts.begin(), clock_capture_tag_cnts.end(), 0, totaler);
for(const auto& kv : clock_capture_tag_cnts) {
std::cout << "\t" << kv.first << " Tags: " << std::setw(int_width) << kv.second << " (" << std::setw(flt_width) << std::fixed << (float) kv.second / total_clock_capture_tags << ")" << std::endl;
}
}
void print_setup_tags(const TimingGraph& tg, const SetupTimingAnalyzer& analyzer) {
OsFormatGuard flag_guard(std::cout);
std::cout << std::endl;
std::cout << "Setup Tags:" << std::endl;
std::cout << std::scientific;
for(const LevelId level_id : tg.levels()) {
std::cout << "Level: " << level_id << std::endl;
for(NodeId node_id : tg.level_nodes(level_id)) {
std::cout << "Node: " << node_id << " (" << tg.node_type(node_id) << ")" << std::endl;;
for(const TimingTag& tag : analyzer.setup_tags(node_id)) {
std::cout << "\t" << tag.type() << ": ";
std::cout << " launch : " << tag.launch_clock_domain();
std::cout << " capture : " << tag.capture_clock_domain();
std::cout << " time: " << tag.time().value();
std::cout << std::endl;
}
}
}
std::cout << std::endl;
}
void print_hold_tags(const TimingGraph& tg, const HoldTimingAnalyzer& analyzer) {
OsFormatGuard flag_guard(std::cout);
std::cout << std::endl;
std::cout << "Hold Tags:" << std::endl;
std::cout << std::scientific;
for(const LevelId level_id : tg.levels()) {
std::cout << "Level: " << level_id << std::endl;
for(NodeId node_id : tg.level_nodes(level_id)) {
std::cout << "Node: " << node_id << " (" << tg.node_type(node_id) << ")" << std::endl;;
for(const TimingTag& tag : analyzer.hold_tags(node_id)) {
std::cout << "\t" << tag.type() << ": ";
std::cout << " launch : " << tag.launch_clock_domain();
std::cout << " capture : " << tag.capture_clock_domain();
std::cout << " time: " << tag.time().value();
std::cout << std::endl;
}
}
}
std::cout << std::endl;
}
void dump_level_times(std::string fname, const TimingGraph& timing_graph, std::map<std::string,float> serial_prof_data, std::map<std::string,float> parallel_prof_data) {
//Per-level speed-up
//cout << "Level Speed-Ups by width:" << endl;
std::map<int,std::vector<LevelId>,std::greater<int>> widths_to_levels;
for(const LevelId level_id : timing_graph.levels()) {
int width = timing_graph.level_nodes(level_id).size();
widths_to_levels[width].push_back(level_id);
}
std::ofstream of(fname);
of << "Width,Level,serial_fwd,serial_bck,parallel_fwd,parallel_bck"<< endl;
for(auto kv : widths_to_levels) {
int width = kv.first;
for(const auto ilevel : kv.second) {
std::stringstream key_fwd;
std::stringstream key_bck;
key_fwd << "fwd_level_" << ilevel;
key_bck << "bck_level_" << ilevel;
of << width << "," << ilevel << ",";
of << serial_prof_data[key_fwd.str()] << "," << serial_prof_data[key_bck.str()] << ",";
of << parallel_prof_data[key_fwd.str()] << "," << parallel_prof_data[key_bck.str()];
of << endl;
}
}
}
} //namepsace

View File

@ -0,0 +1,40 @@
#pragma once
#include <set>
#include <memory>
#include <iomanip>
#include <iostream>
#include <fstream>
#include <vector>
#include <limits>
#include "tatum/timing_analyzers.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
#include "tatum/tags/TimingTags.hpp"
#include "tatum/delay_calc/FixedDelayCalculator.hpp"
namespace tatum {
float time_sec(struct timespec start, struct timespec end);
void print_histogram(const std::vector<float>& values, int nbuckets);
void print_level_histogram(const TimingGraph& tg, int nbuckets);
void print_node_fanin_histogram(const TimingGraph& tg, int nbuckets);
void print_node_fanout_histogram(const TimingGraph& tg, int nbuckets);
void print_timing_graph(std::shared_ptr<const TimingGraph> tg);
void print_levelization(std::shared_ptr<const TimingGraph> tg);
void dump_level_times(std::string fname, const TimingGraph& timing_graph, std::map<std::string,float> serial_prof_data, std::map<std::string,float> parallel_prof_data);
void print_setup_tags_histogram(const TimingGraph& tg, const SetupTimingAnalyzer& analyzer);
void print_hold_tags_histogram(const TimingGraph& tg, const HoldTimingAnalyzer& analyzer);
void print_setup_tags(const TimingGraph& tg, const SetupTimingAnalyzer& analyzer);
void print_hold_tags(const TimingGraph& tg, const HoldTimingAnalyzer& analyzer);
} //namepsace

View File

@ -0,0 +1,44 @@
#include "tatum/base/validate_timing_graph_constraints.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
#include "tatum/error.hpp"
namespace tatum {
bool validate_timing_graph_constraints(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints) {
//Check that all clocks are defined as source nodes
for(DomainId domain : timing_constraints.clock_domains()) {
NodeId source_node = timing_constraints.clock_domain_source_node(domain);
if(source_node) { //Virtual (i.e. IO) clocks may not have sources
if(timing_graph.node_type(source_node) != NodeType::SOURCE) {
std::string msg;
msg = "Clock Domain " + std::to_string(size_t(domain)) + " (" + timing_constraints.clock_domain_name(domain) + ")"
" source node " + std::to_string(size_t(source_node)) + " is not a node of type SOURCE.";
throw tatum::Error(msg, source_node);
}
}
}
//Check that any OPIN nodes with no incoming edges are constant generators
for(NodeId node : timing_graph.nodes()) {
if(timing_graph.node_type(node) == NodeType::OPIN && timing_graph.node_in_edges(node).size() == 0) {
if(!timing_constraints.node_is_constant_generator(node)) {
std::string msg;
msg = "Timing Graph node " + std::to_string(size_t(node)) + " is an OPIN with no incoming edges, but is not marked as a constant generator";
throw tatum::Error(msg, node);
}
}
}
//Nothing here for now
return true;
}
} //namespace

View File

@ -0,0 +1,14 @@
#ifndef TATUM_VALIDATE_GRAPH_COSNTRAINTS
#define TATUM_VALIDATE_GRAPH_COSNTRAINTS
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
namespace tatum {
///\returns true if the constraints are valid for the given timing graph, throws an exception if not
bool validate_timing_graph_constraints(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints);
}
#endif

View File

@ -0,0 +1,22 @@
#pragma once
#include "tatum/Time.hpp"
#include "tatum/TimingGraphFwd.hpp"
namespace tatum {
/**
* Interface for a delay calculator
*/
class DelayCalculator {
public:
virtual ~DelayCalculator() {}
virtual Time min_edge_delay(const TimingGraph& tg, EdgeId edge_id) const = 0;
virtual Time max_edge_delay(const TimingGraph& tg, EdgeId edge_id) const = 0;
virtual Time setup_time(const TimingGraph& tg, EdgeId edge_id) const = 0;
virtual Time hold_time(const TimingGraph& tg, EdgeId edge_id) const = 0;
};
} //namepsace

View File

@ -0,0 +1,61 @@
#pragma once
#include "tatum/util/tatum_linear_map.hpp"
#include "tatum/Time.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
namespace tatum {
/**
* An exmaple DelayCalculator implementation which takes
* a vector of fixed pre-calculated edge delays
*
* \see DelayCalculator
*/
class FixedDelayCalculator : public DelayCalculator {
public:
///Initializes the edge delays
///\param max_edge_delays A container specifying the maximum delay for every combinational edge
///\param setup_times A container specifying the setup times delay for every sequential capture edge
FixedDelayCalculator(const tatum::util::linear_map<EdgeId,Time>& max_edge_delays,
const tatum::util::linear_map<EdgeId,Time>& setup_times)
: max_edge_delays_(max_edge_delays)
, setup_times_(setup_times)
, min_edge_delays_(max_edge_delays)
, hold_times_(setup_times) { }
///Initializes the edge delays
///\param max_edge_delays A container specifying the maximum delay for every combinational edge
///\param setup_times A container specifying the setup times delay for every sequential edge
///\param min_edge_delays A container specifying the minimum delay for every combinational edge
///\param hold_times A container specifying the hold times delay for every sequential capture edge
FixedDelayCalculator(const tatum::util::linear_map<EdgeId,Time>& max_edge_delays,
const tatum::util::linear_map<EdgeId,Time>& setup_times,
const tatum::util::linear_map<EdgeId,Time>& min_edge_delays,
const tatum::util::linear_map<EdgeId,Time>& hold_times)
: max_edge_delays_(max_edge_delays)
, setup_times_(setup_times)
, min_edge_delays_(min_edge_delays)
, hold_times_(hold_times) { }
Time max_edge_delay(const TimingGraph& /*tg*/, EdgeId edge_id) const override { return max_edge_delays_[edge_id]; }
Time min_edge_delay(const TimingGraph& /*tg*/, EdgeId edge_id) const override { return min_edge_delays_[edge_id]; }
Time setup_time(const TimingGraph& /*tg*/, EdgeId edge_id) const override { return setup_times_[edge_id]; }
Time hold_time(const TimingGraph& /*tg*/, EdgeId edge_id) const override { return hold_times_[edge_id]; }
private:
tatum::util::linear_map<EdgeId,Time> max_edge_delays_;
tatum::util::linear_map<EdgeId,Time> setup_times_;
tatum::util::linear_map<EdgeId,Time> min_edge_delays_;
tatum::util::linear_map<EdgeId,Time> hold_times_;
};
} //namepsace

View File

@ -0,0 +1,371 @@
#include <fstream>
#include <cmath>
#include <vector>
#include <algorithm>
#include "echo_writer.hpp"
#include "tatum/util/tatum_assert.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
#include "tatum/tags/TimingTags.hpp"
#include "tatum/timing_analyzers.hpp"
namespace tatum {
void write_tags(std::ostream& os, const std::string& type, const TimingTags::tag_range tags, const NodeId node_id);
void write_slacks(std::ostream& os, const std::string& type, const TimingTags::tag_range tags, const EdgeId edge);
void write_slacks(std::ostream& os, const std::string& type, const TimingTags::tag_range tags, const NodeId edge);
void write_echo(std::string filename, const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const std::shared_ptr<const TimingAnalyzer> analyzer) {
std::ofstream os(filename);
write_echo(os, tg, tc, dc, analyzer);
}
void write_echo(std::ostream& os, const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const std::shared_ptr<const TimingAnalyzer> analyzer) {
write_timing_graph(os, tg);
write_timing_constraints(os, tc);
write_delay_model(os, tg, dc);
write_analysis_result(os, tg, analyzer);
}
void write_delay_model(std::ostream& os, const TimingGraph& tg, const DelayCalculator& dc) {
os << "delay_model:\n";
for(auto edge_id : tg.edges()) {
NodeId src_node = tg.edge_src_node(edge_id);
NodeId sink_node = tg.edge_sink_node(edge_id);
os << " edge: " << size_t(edge_id);
if(tg.node_type(src_node) == NodeType::CPIN && tg.node_type(sink_node) == NodeType::SINK) {
os << " setup_time: " << dc.setup_time(tg, edge_id).value();
os << " hold_time: " << dc.hold_time(tg, edge_id).value();
} else {
os << " min_delay: " << dc.min_edge_delay(tg, edge_id).value();
os << " max_delay: " << dc.max_edge_delay(tg, edge_id).value();
}
os << "\n";
}
os << "\n";
}
void write_timing_graph(std::ostream& os, const TimingGraph& tg) {
os << "timing_graph:" << "\n";
//We manually iterate to write the nodes in ascending order
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
os << " node: " << size_t(node_id) << " \\\n";
os << " type: " << tg.node_type(node_id) << " \\\n";
os << " in_edges:";
auto in_edges = tg.node_in_edges(node_id);
std::vector<EdgeId> edges(in_edges.begin(), in_edges.end());
std::sort(edges.begin(), edges.end()); //sort the edges for consitent output
for(EdgeId edge_id : edges) {
os << " " << size_t(edge_id) ;
}
os << " \\\n";
os << " out_edges:";
auto out_edges = tg.node_out_edges(node_id);
edges = std::vector<EdgeId>(out_edges.begin(), out_edges.end());
std::sort(edges.begin(), edges.end()); //sort the edges for consitent output
for(EdgeId edge_id : edges) {
os << " " << size_t(edge_id);
}
os << "\n";
}
//We manually iterate to write the edges in ascending order
for(size_t edge_idx = 0; edge_idx < tg.edges().size(); ++edge_idx) {
EdgeId edge_id(edge_idx);
os << " edge: " << size_t(edge_id) << " \\\n";
os << " type: " << tg.edge_type(edge_id) << " \\\n";
os << " src_node: " << size_t(tg.edge_src_node(edge_id)) << " \\\n";
os << " sink_node: " << size_t(tg.edge_sink_node(edge_id)) << " \\\n";
os << " disabled: ";
if(tg.edge_disabled(edge_id)) {
os << "true";
} else {
os << "false";
}
os << "\n";
}
os << "\n";
}
void write_timing_constraints(std::ostream& os, const TimingConstraints& tc) {
os << "timing_constraints:\n";
for(auto domain_id : tc.clock_domains()) {
os << " type: CLOCK domain: " << size_t(domain_id) << " name: \"" << tc.clock_domain_name(domain_id) << "\"\n";
}
for(auto domain_id : tc.clock_domains()) {
NodeId source_node_id = tc.clock_domain_source_node(domain_id);
if(source_node_id) {
os << " type: CLOCK_SOURCE node: " << size_t(source_node_id) << " domain: " << size_t(domain_id) << "\n";
}
}
for(auto node_id : tc.constant_generators()) {
os << " type: CONSTANT_GENERATOR node: " << size_t(node_id) << "\n";
}
for(auto kv : tc.input_constraints(DelayType::MAX)) {
auto node_id = kv.first;
auto domain_id = kv.second.domain;
auto constraint = kv.second.constraint;
if(constraint.valid()) {
os << " type: MAX_INPUT_CONSTRAINT node: " << size_t(node_id) << " domain: " << size_t(domain_id) << " constraint: " << constraint << "\n";
}
}
for(auto kv : tc.input_constraints(DelayType::MIN)) {
auto node_id = kv.first;
auto domain_id = kv.second.domain;
auto constraint = kv.second.constraint;
if(constraint.valid()) {
os << " type: MIN_INPUT_CONSTRAINT node: " << size_t(node_id) << " domain: " << size_t(domain_id) << " constraint: " << constraint << "\n";
}
}
for(auto kv : tc.output_constraints(DelayType::MAX)) {
auto node_id = kv.first;
auto domain_id = kv.second.domain;
auto constraint = kv.second.constraint;
if(constraint.valid()) {
os << " type: MAX_OUTPUT_CONSTRAINT node: " << size_t(node_id) << " domain: " << size_t(domain_id) << " constraint: " << constraint << "\n";
}
}
for(auto kv : tc.output_constraints(DelayType::MIN)) {
auto node_id = kv.first;
auto domain_id = kv.second.domain;
auto constraint = kv.second.constraint;
if(constraint.valid()) {
os << " type: MIN_OUTPUT_CONSTRAINT node: " << size_t(node_id) << " domain: " << size_t(domain_id) << " constraint: " << constraint << "\n";
}
}
for(auto kv : tc.setup_constraints()) {
auto key = kv.first;
auto constraint = kv.second;
if(constraint.valid()) {
os << " type: SETUP_CONSTRAINT";
os << " launch_domain: " << size_t(key.domain_pair.src_domain_id);
os << " capture_domain: " << size_t(key.domain_pair.sink_domain_id);
if (key.capture_node) {
os << " capture_node: " << size_t(key.capture_node);
} else {
os << " capture_node: -1";
}
os << " constraint: " << constraint;
os << "\n";
}
}
for(auto kv : tc.hold_constraints()) {
auto key = kv.first;
auto constraint = kv.second;
if(constraint.valid()) {
os << " type: HOLD_CONSTRAINT";
os << " launch_domain: " << size_t(key.domain_pair.src_domain_id);
os << " capture_domain: " << size_t(key.domain_pair.sink_domain_id);
if (key.capture_node) {
os << " capture_node: " << size_t(key.capture_node);
} else {
os << " capture_node: -1";
}
os << " constraint: " << constraint;
os << "\n";
}
}
for(auto kv : tc.setup_clock_uncertainties()) {
auto key = kv.first;
auto uncertainty = kv.second;
os << " type: SETUP_UNCERTAINTY";
os << " launch_domain: " << size_t(key.src_domain_id);
os << " capture_domain: " << size_t(key.sink_domain_id);
os << " uncertainty: " << uncertainty;
os << "\n";
}
for(auto kv : tc.hold_clock_uncertainties()) {
auto key = kv.first;
auto uncertainty = kv.second;
os << " type: HOLD_UNCERTAINTY";
os << " launch_domain: " << size_t(key.src_domain_id);
os << " capture_domain: " << size_t(key.sink_domain_id);
os << " uncertainty: " << uncertainty;
os << "\n";
}
for(auto kv : tc.source_latencies(ArrivalType::EARLY)) {
auto domain = kv.first;
auto latency = kv.second;
os << " type: EARLY_SOURCE_LATENCY";
os << " domain: " << size_t(domain);
os << " latency: " << latency;
os << "\n";
}
for(auto kv : tc.source_latencies(ArrivalType::LATE)) {
auto domain = kv.first;
auto latency = kv.second;
os << " type: LATE_SOURCE_LATENCY";
os << " domain: " << size_t(domain);
os << " latency: " << latency;
os << "\n";
}
os << "\n";
}
void write_analysis_result(std::ostream& os, const TimingGraph& tg, const std::shared_ptr<const TimingAnalyzer> analyzer) {
os << "analysis_result:\n";
auto setup_analyzer = std::dynamic_pointer_cast<const SetupTimingAnalyzer>(analyzer);
if(setup_analyzer) {
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_tags(os, "SETUP_DATA_ARRIVAL", setup_analyzer->setup_tags(node_id, TagType::DATA_ARRIVAL), node_id);
}
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_tags(os, "SETUP_DATA_REQUIRED", setup_analyzer->setup_tags(node_id, TagType::DATA_REQUIRED), node_id);
}
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_tags(os, "SETUP_LAUNCH_CLOCK", setup_analyzer->setup_tags(node_id, TagType::CLOCK_LAUNCH), node_id);
}
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_tags(os, "SETUP_CAPTURE_CLOCK", setup_analyzer->setup_tags(node_id, TagType::CLOCK_CAPTURE), node_id);
}
for(size_t edge_idx = 0; edge_idx < tg.edges().size(); ++edge_idx) {
EdgeId edge_id(edge_idx);
write_slacks(os, "SETUP_SLACK", setup_analyzer->setup_slacks(edge_id), edge_id);
}
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_slacks(os, "SETUP_SLACK", setup_analyzer->setup_slacks(node_id), node_id);
}
}
auto hold_analyzer = std::dynamic_pointer_cast<const HoldTimingAnalyzer>(analyzer);
if(hold_analyzer) {
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_tags(os, "HOLD_DATA_ARRIVAL", hold_analyzer->hold_tags(node_id, TagType::DATA_ARRIVAL), node_id);
}
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_tags(os, "HOLD_DATA_REQUIRED", hold_analyzer->hold_tags(node_id, TagType::DATA_REQUIRED), node_id);
}
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_tags(os, "HOLD_LAUNCH_CLOCK", hold_analyzer->hold_tags(node_id, TagType::CLOCK_LAUNCH), node_id);
}
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_tags(os, "HOLD_CAPTURE_CLOCK", hold_analyzer->hold_tags(node_id, TagType::CLOCK_CAPTURE), node_id);
}
for(size_t edge_idx = 0; edge_idx < tg.edges().size(); ++edge_idx) {
EdgeId edge_id(edge_idx);
write_slacks(os, "HOLD_SLACK", hold_analyzer->hold_slacks(edge_id), edge_id);
}
for(size_t node_idx = 0; node_idx < tg.nodes().size(); ++node_idx) {
NodeId node_id(node_idx);
write_slacks(os, "HOLD_SLACK", hold_analyzer->hold_slacks(node_id), node_id);
}
}
os << "\n";
}
void write_tags(std::ostream& os, const std::string& type, const TimingTags::tag_range tags, const NodeId node_id) {
for(const auto& tag : tags) {
TATUM_ASSERT(tag.type() != TagType::SLACK);
float time = tag.time().value();
if(!std::isnan(time)) {
os << " type: " << type;
os << " node: " << size_t(node_id);
os << " launch_domain: ";
if(tag.launch_clock_domain()) {
os << size_t(tag.launch_clock_domain());
} else {
os << "-1";
}
os << " capture_domain: ";
if(tag.capture_clock_domain()) {
os << size_t(tag.capture_clock_domain());
} else {
os << "-1";
}
os << " time: " << time;
os << "\n";
}
}
}
void write_slacks(std::ostream& os, const std::string& type, const TimingTags::tag_range tags, const EdgeId edge) {
for(const auto& tag : tags) {
TATUM_ASSERT(tag.type() == TagType::SLACK);
float time = tag.time().value();
if(!std::isnan(time)) {
os << " type: " << type;
os << " edge: " << size_t(edge);
os << " launch_domain: ";
if(tag.launch_clock_domain()) {
os << size_t(tag.launch_clock_domain());
} else {
os << "-1";
}
os << " capture_domain: ";
if(tag.capture_clock_domain()) {
os << size_t(tag.capture_clock_domain());
} else {
os << "-1";
}
os << " slack: " << time;
os << "\n";
}
}
}
void write_slacks(std::ostream& os, const std::string& type, const TimingTags::tag_range tags, const NodeId node) {
for(const auto& tag : tags) {
TATUM_ASSERT(tag.type() == TagType::SLACK);
float time = tag.time().value();
if(!std::isnan(time)) {
os << " type: " << type;
os << " node: " << size_t(node);
os << " launch_domain: ";
if(tag.launch_clock_domain()) {
os << size_t(tag.launch_clock_domain());
} else {
os << "-1";
}
os << " capture_domain: ";
if(tag.capture_clock_domain()) {
os << size_t(tag.capture_clock_domain());
} else {
os << "-1";
}
os << " slack: " << time;
os << "\n";
}
}
}
}

View File

@ -0,0 +1,20 @@
#pragma once
#include <memory>
#include <iostream>
#include <fstream>
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
#include "tatum/timing_analyzers.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
namespace tatum {
void write_timing_graph(std::ostream& os, const TimingGraph& tg);
void write_timing_constraints(std::ostream& os, const TimingConstraints& tc);
void write_analysis_result(std::ostream& os, const TimingGraph& tg, const std::shared_ptr<const TimingAnalyzer> analyzer);
void write_delay_model(std::ostream& os, const TimingGraph& tg, const DelayCalculator& dc);
void write_echo(std::string filename, const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const std::shared_ptr<const TimingAnalyzer> analyzer);
void write_echo(std::ostream& os, const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const std::shared_ptr<const TimingAnalyzer> analyzer);
}

View File

@ -0,0 +1,36 @@
#ifndef TATUM_ERROR_HPP
#define TATUM_ERROR_HPP
#include <stdexcept>
#include "tatum/TimingGraphFwd.hpp"
namespace tatum {
class Error : public std::runtime_error {
public:
//String only
explicit Error(const std::string& what_str)
: std::runtime_error(what_str) {}
//String and node
explicit Error(const std::string& what_str, const NodeId n)
: std::runtime_error(what_str)
, node(n) {}
//String and edge
explicit Error(const std::string& what_str, const EdgeId e)
: std::runtime_error(what_str)
, edge(e) {}
//String, node and edge
explicit Error(const std::string& what_str, const NodeId n, const EdgeId e)
: std::runtime_error(what_str)
, node(n)
, edge(e) {}
NodeId node; //The related node, may be invalid if unspecified
EdgeId edge; //The related edge, may be invalid if unspecified
};
}
#endif

View File

@ -0,0 +1,84 @@
#pragma once
#include "tatum/tags/TimingTags.hpp"
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/util/tatum_linear_map.hpp"
namespace tatum { namespace detail {
/** \class CommonAnalysisOps
*
* The operations for CommonAnalysisVisitor to perform setup analysis.
* The setup analysis operations define that maximum edge delays are used, and that the
* maixmum arrival time (and minimum required times) are propagated through the timing graph.
*
* \see HoldAnalysisOps
* \see SetupAnalysisOps
* \see CommonAnalysisVisitor
*/
class CommonAnalysisOps {
public:
CommonAnalysisOps(size_t num_nodes, size_t num_edges)
: node_tags_(num_nodes)
, edge_slacks_(num_edges)
, node_slacks_(num_nodes) {}
CommonAnalysisOps(const CommonAnalysisOps&) = delete;
CommonAnalysisOps(CommonAnalysisOps&&) = delete;
CommonAnalysisOps& operator=(const CommonAnalysisOps&) = delete;
CommonAnalysisOps& operator=(CommonAnalysisOps&&) = delete;
TimingTags::tag_range get_tags(const NodeId node_id) {
return node_tags_[node_id].tags();
}
TimingTags::tag_range get_tags(const NodeId node_id, TagType type) {
return node_tags_[node_id].tags(type);
}
TimingTags::tag_range get_tags(const NodeId node_id) const {
return node_tags_[node_id].tags();
}
TimingTags::tag_range get_tags(const NodeId node_id, TagType type) const {
return node_tags_[node_id].tags(type);
}
void add_tag(const NodeId node, const TimingTag& tag) {
node_tags_[node].add_tag(tag);
}
void reset_node(const NodeId node) {
node_tags_[node].clear();
node_slacks_[node].clear();
}
void merge_slack_tags(const EdgeId edge, const Time time, TimingTag ref_tag) {
ref_tag.set_type(TagType::SLACK);
edge_slacks_[edge].min(time, ref_tag.origin_node(), ref_tag);
}
void merge_slack_tags(const NodeId node, const Time time, TimingTag ref_tag) {
ref_tag.set_type(TagType::SLACK);
node_slacks_[node].min(time, ref_tag.origin_node(), ref_tag);
}
TimingTags::tag_range get_edge_slacks(const EdgeId edge) const {
return edge_slacks_[edge].tags(TagType::SLACK);
}
TimingTags::tag_range get_node_slacks(const NodeId node) const {
return node_slacks_[node].tags(TagType::SLACK);
}
void reset_edge(const EdgeId edge) {
edge_slacks_[edge].clear();
}
protected:
tatum::util::linear_map<NodeId,TimingTags> node_tags_;
tatum::util::linear_map<EdgeId,TimingTags> edge_slacks_;
tatum::util::linear_map<NodeId,TimingTags> node_slacks_;
};
}} //namespace

View File

@ -0,0 +1,628 @@
#ifndef TATUM_COMMON_ANALYSIS_VISITOR_HPP
#define TATUM_COMMON_ANALYSIS_VISITOR_HPP
#include "tatum/error.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
#include "tatum/tags/TimingTags.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
#include "tatum/graph_visitors/GraphVisitor.hpp"
namespace tatum { namespace detail {
/** \file
*
* Common analysis functionality for both setup and hold analysis.
*/
/** \class CommonAnalysisVisitor
*
* A class satisfying the GraphVisitor concept, which contains common
* node and edge processing code used by both setup and hold analysis.
*
* \see GraphVisitor
*
* \tparam AnalysisOps a class defining the setup/hold specific operations
* \see SetupAnalysisOps
* \see HoldAnalysisOps
*/
template<class AnalysisOps>
class CommonAnalysisVisitor : public GraphVisitor {
public:
CommonAnalysisVisitor(size_t num_tags, size_t num_slacks)
: ops_(num_tags, num_slacks) { }
void do_reset_node(const NodeId node_id) override { ops_.reset_node(node_id); }
void do_reset_edge(const EdgeId edge_id) override { ops_.reset_edge(edge_id); }
bool do_arrival_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) override;
bool do_required_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) override;
void do_arrival_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) override;
void do_required_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) override;
void do_slack_traverse_node(const TimingGraph& tg, const DelayCalculator& dc, const NodeId node) override;
protected:
AnalysisOps ops_;
private:
void do_arrival_traverse_edge(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id, const EdgeId edge_id);
void do_required_traverse_edge(const TimingGraph& tg, const DelayCalculator& dc, const NodeId node_id, const EdgeId edge_id);
void do_slack_traverse_edge(const TimingGraph& tg, const DelayCalculator& dc, const EdgeId edge);
void mark_sink_required_times(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node);
bool should_propagate_clocks(const TimingGraph& tg, const TimingConstraints& tc, const EdgeId edge_id) const;
bool should_propagate_clock_launch_tags(const TimingGraph& tg, const EdgeId edge_id) const;
bool should_propagate_clock_capture_tags(const TimingGraph& tg, const EdgeId edge_id) const;
bool should_propagate_data(const TimingGraph& tg, const EdgeId edge_id) const;
bool should_calculate_slack(const TimingTag& src_tag, const TimingTag& sink_tag) const;
bool is_clock_data_launch_edge(const TimingGraph& tg, const EdgeId edge_id) const;
bool is_clock_data_capture_edge(const TimingGraph& tg, const EdgeId edge_id) const;
};
/*
* Pre-traversal
*/
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::do_arrival_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) {
//Logical Input
//We expect this function to only be called on nodes in the first level of the timing graph
//These nodes must have no un-disabled input edges (else they shouldn't be in the first level).
//In the normal case (primary input) there are no incoming edges. However if set_disable_timing
//was used it may be that the edges were explicitly disabled. We therefore verify that there are
//no un-disabled edges in the fanin of the current node.
TATUM_ASSERT_MSG(tg.node_num_active_in_edges(node_id) == 0, "Logical input has non-disabled input edges: timing graph not levelized.");
//
//We now generate the various clock/data launch tags associated with the arrival time traversal
//
NodeType node_type = tg.node_type(node_id);
bool node_constrained = false;
if(tc.node_is_constant_generator(node_id)) {
//We progpagate the tags from constant generators to ensure any sinks driven
//only by constant generators are recorded as constrained.
//
//We use a special tag to initialize constant generators which gets overritten
//by any non-constant tag at downstream nodes
TimingTag const_gen_tag = ops_.const_gen_tag();
ops_.add_tag(node_id, const_gen_tag);
node_constrained = true;
} else {
TATUM_ASSERT(node_type == NodeType::SOURCE);
if(tc.node_is_clock_source(node_id)) {
//Generate the appropriate clock tag
TATUM_ASSERT_MSG(ops_.get_tags(node_id, TagType::CLOCK_LAUNCH).size() == 0, "Uninitialized clock source should have no launch clock tags");
TATUM_ASSERT_MSG(ops_.get_tags(node_id, TagType::CLOCK_CAPTURE).size() == 0, "Uninitialized clock source should have no capture clock tags");
//Find the domain of this node (since it is a source)
DomainId domain_id = tc.node_clock_domain(node_id);
TATUM_ASSERT(domain_id);
//Initialize a clock launch tag from this to any capture domain
//
//Note: we assume that edge counting has set the effective period constraint assuming a
//launch edge at time zero + source latency. This means we don't need to do anything
//special for clocks with rising edges after time zero.
Time launch_source_latency = ops_.launch_source_latency(tc, domain_id);
TimingTag launch_tag = TimingTag(launch_source_latency,
domain_id,
DomainId::INVALID(), //Any capture
NodeId::INVALID(), //Origin
TagType::CLOCK_LAUNCH);
//Add the launch tag
ops_.add_tag(node_id, launch_tag);
//Initialize the clock capture tags from any valid launch domain to this domain
//
//Note that we enumerate all pairs of valid launch domains for the current domain
//(which is now treated as the capture domain), since each pair may have different constraints
for(DomainId launch_domain_id : tc.clock_domains()) {
if(tc.should_analyze(launch_domain_id, domain_id)) {
//Initialize the clock capture tag with the constraint, including the effect of any source latency
//
//Note: We assume that this period constraint has been resolved by edge counting for this
//domain pair. Note that it does not include the effect of clock uncertainty, which is handled
//when the caputre tag is converted into a data-arrival tag.
//
//Also note that this is the default clock constraint. If there is a different per capture node
//constraint this is also handled when setting the required time.
Time clock_constraint = ops_.clock_constraint(tc, launch_domain_id, domain_id);
Time capture_source_latency = ops_.capture_source_latency(tc, domain_id);
TimingTag capture_tag = TimingTag(Time(capture_source_latency) + Time(clock_constraint),
launch_domain_id,
domain_id,
NodeId::INVALID(), //Origin
TagType::CLOCK_CAPTURE);
ops_.add_tag(node_id, capture_tag);
node_constrained = true;
}
}
} else {
//A standard primary input, generate the appropriate data tags
TATUM_ASSERT_MSG(ops_.get_tags(node_id, TagType::DATA_ARRIVAL).size() == 0, "Primary input already has data tags");
auto input_constraints = ops_.input_constraints(tc, node_id);
if(!input_constraints.empty()) { //Some inputs may be unconstrained, so do not create tags for them
DomainId domain_id = tc.node_clock_domain(node_id);
TATUM_ASSERT(domain_id);
//The external clock may have latency
Time launch_source_latency = ops_.launch_source_latency(tc, domain_id);
//An input constraint means there is 'input_constraint' delay from when an external
//signal is launched by its clock (external to the chip) until it arrives at the
//primary input
Time input_constraint = ops_.input_constraint(tc, node_id, domain_id);
TATUM_ASSERT(input_constraint.valid());
//Initialize a data tag based on input delay constraint
TimingTag input_tag = TimingTag(launch_source_latency + input_constraint,
domain_id,
DomainId::INVALID(),
NodeId::INVALID(), //Origin
TagType::DATA_ARRIVAL);
ops_.add_tag(node_id, input_tag);
node_constrained = true;
}
}
}
return node_constrained;
}
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::do_required_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& /*tc*/, const NodeId node_id) {
NodeType node_type = tg.node_type(node_id);
TATUM_ASSERT(node_type == NodeType::SINK);
return is_constrained(node_type, ops_.get_tags(node_id));
}
/*
* Arrival Time Operations
*/
template<class AnalysisOps>
void CommonAnalysisVisitor<AnalysisOps>::do_arrival_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, NodeId node_id) {
//Pull from upstream sources to current node
for(EdgeId edge_id : tg.node_in_edges(node_id)) {
if(tg.edge_disabled(edge_id)) continue;
do_arrival_traverse_edge(tg, tc, dc, node_id, edge_id);
}
if(tg.node_type(node_id) == NodeType::SINK) {
mark_sink_required_times(tg, tc, dc, node_id);
}
}
template<class AnalysisOps>
void CommonAnalysisVisitor<AnalysisOps>::do_arrival_traverse_edge(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id, const EdgeId edge_id) {
//Pulling values from upstream source node
NodeId src_node_id = tg.edge_src_node(edge_id);
if(should_propagate_clocks(tg, tc, edge_id)) {
/*
* Clock tags
*/
//Propagate the clock tags through the clock network
//The launch tags
if(should_propagate_clock_launch_tags(tg, edge_id)) {
TimingTags::tag_range src_launch_clk_tags = ops_.get_tags(src_node_id, TagType::CLOCK_LAUNCH);
if(!src_launch_clk_tags.empty()) {
const Time clk_launch_edge_delay = ops_.launch_clock_edge_delay(dc, tg, edge_id);
for(const TimingTag& src_launch_clk_tag : src_launch_clk_tags) {
//Standard propagation through the clock network
Time new_arr = src_launch_clk_tag.time() + clk_launch_edge_delay;
ops_.merge_arr_tags(node_id, new_arr, src_node_id, src_launch_clk_tag);
}
}
}
//The capture tags
if(should_propagate_clock_capture_tags(tg, edge_id)) {
TimingTags::tag_range src_capture_clk_tags = ops_.get_tags(src_node_id, TagType::CLOCK_CAPTURE);
if(!src_capture_clk_tags.empty()) {
const Time clk_capture_edge_delay = ops_.capture_clock_edge_delay(dc, tg, edge_id);
for(const TimingTag& src_capture_clk_tag : src_capture_clk_tags) {
//Standard propagation through the clock network
ops_.merge_arr_tags(node_id, src_capture_clk_tag.time() + clk_capture_edge_delay, src_node_id, src_capture_clk_tag);
}
}
}
}
/*
* Data Arrival tags
*/
if(is_clock_data_launch_edge(tg, edge_id)) {
//Convert the launch clock into a data arrival
//We convert the clock arrival time at the upstream node into a data
//arrival time at this node (since the clock's arrival launches the data).
TATUM_ASSERT_SAFE(tg.node_type(node_id) == NodeType::SOURCE);
TimingTags::tag_range src_launch_clk_tags = ops_.get_tags(src_node_id, TagType::CLOCK_LAUNCH);
if(!src_launch_clk_tags.empty()) {
const Time launch_edge_delay = ops_.launch_clock_edge_delay(dc, tg, edge_id);
for(const TimingTag& src_launch_clk_tag : src_launch_clk_tags) {
//Convert clock launch into data arrival
TimingTag data_arr_tag = src_launch_clk_tag;
data_arr_tag.set_type(TagType::DATA_ARRIVAL);
Time arr_time = src_launch_clk_tag.time() + launch_edge_delay;
//Mark propagated launch time as a DATA tag
ops_.merge_arr_tags(node_id,
arr_time,
NodeId::INVALID(), //Origin
data_arr_tag);
}
}
}
if(should_propagate_data(tg, edge_id)) {
//Standard data path propagation
TimingTags::tag_range src_data_tags = ops_.get_tags(src_node_id, TagType::DATA_ARRIVAL);
if(!src_data_tags.empty()) {
const Time edge_delay = ops_.data_edge_delay(dc, tg, edge_id);
TATUM_ASSERT_SAFE(edge_delay.valid());
for(const TimingTag& src_data_tag : src_data_tags) {
Time new_arr = src_data_tag.time() + edge_delay;
ops_.merge_arr_tags(node_id, new_arr, src_node_id, src_data_tag);
}
}
}
//NOTE: we do not handle clock caputure edges (which create required times) here, but in
// mark_sink_required_times(), after all edges have been processed (i.e. all arrival times
// set)
//
//To calulate the required times at a node we must know all the arrival times at the node.
//Since we don't know the order the incomming edges are processed, we don't know whether all
//arrival times have been set until *after* all edges have been processed.
//
//As a result we set the required times only after all the edges have been processed
}
/*
* Required Time Operations
*/
template<class AnalysisOps>
void CommonAnalysisVisitor<AnalysisOps>::do_required_traverse_node(const TimingGraph& tg, const TimingConstraints& /*tc*/, const DelayCalculator& dc, const NodeId node_id) {
//Don't propagate required times through the clock network
if(tg.node_type(node_id) == NodeType::CPIN) return;
//Pull from downstream sinks to current node
for(EdgeId edge_id : tg.node_out_edges(node_id)) {
if(tg.edge_disabled(edge_id)) continue;
do_required_traverse_edge(tg, dc, node_id, edge_id);
}
}
template<class AnalysisOps>
void CommonAnalysisVisitor<AnalysisOps>::do_required_traverse_edge(const TimingGraph& tg, const DelayCalculator& dc, const NodeId node_id, const EdgeId edge_id) {
//Pulling values from downstream sink node
NodeId sink_node_id = tg.edge_sink_node(edge_id);
TimingTags::tag_range sink_data_tags = ops_.get_tags(sink_node_id, TagType::DATA_REQUIRED);
if(!sink_data_tags.empty()) {
const Time& edge_delay = ops_.data_edge_delay(dc, tg, edge_id);
TATUM_ASSERT_SAFE(edge_delay.valid());
for(const TimingTag& sink_tag : sink_data_tags) {
//We only propogate the required time if we have a valid matching arrival time
ops_.merge_req_tags(node_id, sink_tag.time() - edge_delay, sink_node_id, sink_tag, true);
}
}
}
template<class AnalysisOps>
void CommonAnalysisVisitor<AnalysisOps>::do_slack_traverse_node(const TimingGraph& tg, const DelayCalculator& dc, const NodeId node) {
//Calculate the slack for each edge
for(const EdgeId edge : tg.node_in_edges(node)) {
do_slack_traverse_edge(tg, dc, edge);
}
//Calculate the slacks at each node
for(const TimingTag& arr_tag : ops_.get_tags(node, TagType::DATA_ARRIVAL)) {
for(const TimingTag& req_tag : ops_.get_tags(node, TagType::DATA_REQUIRED)) {
if(!should_calculate_slack(arr_tag, req_tag)) continue;
Time slack_value = ops_.calculate_slack(req_tag.time(), arr_tag.time());
ops_.merge_slack_tags(node, slack_value, req_tag);
}
}
}
template<class AnalysisOps>
void CommonAnalysisVisitor<AnalysisOps>::do_slack_traverse_edge(const TimingGraph& tg, const DelayCalculator& dc, const EdgeId edge) {
NodeId src_node = tg.edge_src_node(edge);
NodeId sink_node = tg.edge_sink_node(edge);
auto src_arr_tags = ops_.get_tags(src_node, TagType::DATA_ARRIVAL);
auto sink_req_tags = ops_.get_tags(sink_node, TagType::DATA_REQUIRED);
Time edge_delay;
if(is_clock_data_launch_edge(tg, edge)) {
edge_delay = ops_.launch_clock_edge_delay(dc, tg, edge);
} else if(is_clock_data_capture_edge(tg, edge)) {
edge_delay = ops_.capture_clock_edge_delay(dc, tg, edge);
} else {
edge_delay = ops_.data_edge_delay(dc, tg, edge);
}
for(const tatum::TimingTag& src_arr_tag : src_arr_tags) {
for(const tatum::TimingTag& sink_req_tag : sink_req_tags) {
if(!should_calculate_slack(src_arr_tag, sink_req_tag)) continue;
Time slack_value = sink_req_tag.time() - src_arr_tag.time() - edge_delay;
ops_.merge_slack_tags(edge, slack_value, sink_req_tag);
}
}
}
template<class AnalysisOps>
void CommonAnalysisVisitor<AnalysisOps>::mark_sink_required_times(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) {
//Mark the required times of the current sink node
TATUM_ASSERT(tg.node_type(node_id) == NodeType::SINK);
//Note: since we add tags at the current node (and the tags are all stored together),
//we must *copy* the data arrival tags before adding any new tags (since adding new
//tags may invalidate the old tag references)
auto data_arr_range = ops_.get_tags(node_id, TagType::DATA_ARRIVAL);
std::vector<TimingTag> node_data_arr_tags(data_arr_range.begin(), data_arr_range.end());
EdgeId clock_capture_edge = tg.node_clock_capture_edge(node_id);
if(clock_capture_edge) {
//Required time at sink FF
NodeId src_node_id = tg.edge_src_node(clock_capture_edge);
TimingTags::tag_range src_capture_clk_tags = ops_.get_tags(src_node_id, TagType::CLOCK_CAPTURE);
const Time capture_edge_delay = ops_.capture_clock_edge_delay(dc, tg, clock_capture_edge);
for(const TimingTag& src_capture_clk_tag : src_capture_clk_tags) {
DomainId clock_launch_domain = src_capture_clk_tag.launch_clock_domain();
DomainId clock_capture_domain = src_capture_clk_tag.capture_clock_domain();
for(const TimingTag& node_data_arr_tag : node_data_arr_tags) {
DomainId data_launch_domain = node_data_arr_tag.launch_clock_domain();
if(is_const_gen_tag(node_data_arr_tag)) {
//A constant generator tag. Required time is not terribly well defined,
//so just use the capture clock tag values since we nevery look at these
//tags except to check that any downstream nodes have been constrained
data_launch_domain = src_capture_clk_tag.capture_clock_domain();
}
//We produce a fully specified capture clock tags (both launch and capture) so we only want
//to consider the capture clock tag which matches the data launch domain
bool same_launch_domain = (data_launch_domain == clock_launch_domain);
//We only want to analyze paths between domains where a valid constraint has been specified
bool valid_launch_capture_pair = tc.should_analyze(data_launch_domain, clock_capture_domain, node_id);
if(same_launch_domain && valid_launch_capture_pair) {
//We only set a required time if the source domain actually reaches this sink
//domain. This is indicated by the presence of an arrival tag (which should have
//a valid arrival time).
TATUM_ASSERT(node_data_arr_tag.time().valid());
//If there is a per-sink override for the clock constraint we need to adjust the clock
//arrival time from the default
Time clock_constraint_offset = ops_.clock_constraint(tc, data_launch_domain, clock_capture_domain, node_id)
- ops_.clock_constraint(tc, data_launch_domain, clock_capture_domain);
//We apply the clock uncertainty to the generated required time tag
Time clock_uncertainty = ops_.clock_uncertainty(tc, data_launch_domain, clock_capture_domain);
Time req_time = src_capture_clk_tag.time() //Latency + propagated clock network delay to CPIN
+ clock_constraint_offset //Period constraint adjustment
+ capture_edge_delay //CPIN to sink delay (Thld, or Tsu)
+ Time(clock_uncertainty); //Clock period uncertainty
TimingTag node_data_req_tag(req_time,
data_launch_domain,
clock_capture_domain,
NodeId::INVALID(), //Origin
TagType::DATA_REQUIRED);
ops_.add_tag(node_id, node_data_req_tag);
}
}
}
} else {
//Must be a primary-output sink, need to set required tags based on output constraints
DomainId io_capture_domain = tc.node_clock_domain(node_id);
//Any constrained primary outputs should have a specified clock domain
// Note that some outputs may not be constrained and hence should not get required times
if(io_capture_domain) {
//An output constraint means there is output_constraint delay outside the chip,
//as a result signals need to reach the primary-output at least output_constraint
//before the capture clock.
//
//Hence we use a negative output constraint value to subtract the output constraint
//from the target clock constraint
Time output_constraint = -ops_.output_constraint(tc, node_id, io_capture_domain);
if (output_constraint.valid()) {
//Since there is no propagated clock tag to primary outputs, we need to account for
//the capture source clock latency
Time capture_clock_source_latency = ops_.capture_source_latency(tc, io_capture_domain);
for(const TimingTag& node_data_arr_tag : node_data_arr_tags) {
DomainId data_launch_domain = node_data_arr_tag.launch_clock_domain();
if(is_const_gen_tag(node_data_arr_tag)) {
//A constant generator tag. Required time is not terribly well defined,
//so just use the inter-domain values since we nevery look at these
//tags except to check that any downstream nodes have been constrained
data_launch_domain = io_capture_domain;
}
//Should we be analyzing paths between these two domains?
if(tc.should_analyze(data_launch_domain, io_capture_domain, node_id)) {
//We only set a required time if the source domain actually reaches this sink
//domain. This is indicated by the presence of an arrival tag (which should have
//a valid arrival time).
TATUM_ASSERT(node_data_arr_tag.time().valid());
Time constraint = ops_.clock_constraint(tc, data_launch_domain, io_capture_domain, node_id);
Time clock_uncertainty = ops_.clock_uncertainty(tc, data_launch_domain, io_capture_domain);
//Calulate the required time
Time req_time = Time(constraint) //Period constraint
+ Time(capture_clock_source_latency) //Latency from true clock source to def'n point
+ Time(output_constraint) //Output delay
+ Time(clock_uncertainty); //Clock period uncertainty
TimingTag node_data_req_tag(req_time,
data_launch_domain,
io_capture_domain,
NodeId::INVALID(), //Origin
TagType::DATA_REQUIRED);
ops_.add_tag(node_id, node_data_req_tag);
}
}
}
}
}
}
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::should_propagate_clocks(const TimingGraph& tg, const TimingConstraints& tc, const EdgeId edge_id) const {
//We want to propagate clock tags through the arbitrary nodes making up the clock network until
//we hit another source node (i.e. a FF's output source).
//
//To allow tags to propagte from the original source (i.e. the input clock pin) we also allow
//propagation from defined clock sources
NodeId src_node_id = tg.edge_src_node(edge_id);
NodeId sink_node_id = tg.edge_sink_node(edge_id);
if (tg.node_type(sink_node_id) != NodeType::SOURCE) {
//Not a source, allow propagation
if (tc.node_is_clock_source(src_node_id)) {
//The source is a clock source
TATUM_ASSERT_MSG(tg.node_type(src_node_id) == NodeType::SOURCE, "Only SOURCEs can be clock sources");
TATUM_ASSERT_MSG(tg.node_in_edges(src_node_id).empty(), "Clock sources should have no incoming edges");
}
return true;
}
return false;
}
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::should_propagate_clock_launch_tags(const TimingGraph& tg, const EdgeId edge_id) const {
return !is_clock_data_capture_edge(tg, edge_id);
}
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::should_propagate_clock_capture_tags(const TimingGraph& tg, const EdgeId edge_id) const {
NodeId sink_node = tg.edge_sink_node(edge_id);
return tg.node_type(sink_node) != NodeType::SINK;
}
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::is_clock_data_launch_edge(const TimingGraph& tg, const EdgeId edge_id) const {
NodeId edge_src_node = tg.edge_src_node(edge_id);
NodeId edge_sink_node = tg.edge_sink_node(edge_id);
return (tg.node_type(edge_src_node) == NodeType::CPIN) && (tg.node_type(edge_sink_node) == NodeType::SOURCE);
}
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::is_clock_data_capture_edge(const TimingGraph& tg, const EdgeId edge_id) const {
NodeId edge_src_node = tg.edge_src_node(edge_id);
NodeId edge_sink_node = tg.edge_sink_node(edge_id);
return (tg.node_type(edge_src_node) == NodeType::CPIN) && (tg.node_type(edge_sink_node) == NodeType::SINK);
}
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::should_propagate_data(const TimingGraph& tg, const EdgeId edge_id) const {
//We want to propagate data tags unless then re-enter the clock network
NodeId src_node_id = tg.edge_src_node(edge_id);
NodeType src_node_type = tg.node_type(src_node_id);
if (src_node_type != NodeType::CPIN) {
//Do not allow data tags to propagate through clock pins
return true;
}
return false;
}
template<class AnalysisOps>
bool CommonAnalysisVisitor<AnalysisOps>::should_calculate_slack(const TimingTag& src_tag, const TimingTag& sink_tag) const {
TATUM_ASSERT_SAFE(src_tag.type() == TagType::DATA_ARRIVAL && sink_tag.type() == TagType::DATA_REQUIRED);
//NOTE: we do not need to check the constraints to determine whether this domain pair should be analyzed,
// this check has already been done when we created the DATA_REQUIRED tags (i.e. sink tags in this context),
// ensuring we only calculate slack for valid domain pairs (otherwise the DATA_REQUIREd tag would note exist)
return src_tag.launch_clock_domain() == sink_tag.launch_clock_domain();
}
}} //namepsace
#endif

View File

@ -0,0 +1,23 @@
#pragma once
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
namespace tatum {
class GraphVisitor {
public:
virtual ~GraphVisitor() {}
virtual void do_reset_node(const NodeId node_id) = 0;
virtual void do_reset_edge(const EdgeId edge_id) = 0;
virtual bool do_arrival_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) = 0;
virtual bool do_required_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) = 0;
virtual void do_arrival_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) = 0;
virtual void do_required_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) = 0;
virtual void do_slack_traverse_node(const TimingGraph& tg, const DelayCalculator& dc, const NodeId node) = 0;
};
}

View File

@ -0,0 +1,113 @@
#pragma once
#include "tatum/graph_visitors/CommonAnalysisOps.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
namespace tatum { namespace detail {
/** \class HoldAnalysisOps
*
* The operations for CommonAnalysisVisitor to perform hold analysis.
* The operations are similar to those used for setup analysis, except that minum edge delays
* are used, and the minumum arrival times (and maximum required times) are propagated through
* the timing graph.
*
* \see SetupAnalysisOps
* \see CommonAnalysisVisitor
*/
class HoldAnalysisOps : public CommonAnalysisOps {
public:
HoldAnalysisOps(size_t num_tags, size_t num_slacks)
: CommonAnalysisOps(num_tags, num_slacks) {}
Time clock_constraint(const TimingConstraints& tc, const DomainId src_id, const DomainId sink_id) {
return tc.hold_constraint(src_id, sink_id, NodeId::INVALID());
}
Time clock_constraint(const TimingConstraints& tc, const DomainId src_id, const DomainId sink_id, const NodeId capture_node) {
return tc.hold_constraint(src_id, sink_id, capture_node);
}
Time clock_uncertainty(const TimingConstraints& tc, const DomainId src_id, const DomainId sink_id) {
//Hold analysis, so late capture clock arrival is pessimistic
return +tc.hold_clock_uncertainty(src_id, sink_id);
}
Time launch_source_latency(const TimingConstraints& tc, const DomainId domain) {
//For pessimistic hold analysis launch occurs early
return tc.source_latency(domain, ArrivalType::EARLY);
}
Time capture_source_latency(const TimingConstraints& tc, const DomainId domain) {
//For pessimistic hold analysis capture occurs late
return tc.source_latency(domain, ArrivalType::LATE);
}
Time input_constraint(const TimingConstraints& tc, const NodeId node, const DomainId domain) {
return tc.input_constraint(node, domain, DelayType::MIN);
}
auto input_constraints(const TimingConstraints& tc, const NodeId node) {
return tc.input_constraints(node, DelayType::MIN);
}
Time output_constraint(const TimingConstraints& tc, const NodeId node, const DomainId domain) {
return tc.output_constraint(node, domain, DelayType::MIN);
}
auto output_constraints(const TimingConstraints& tc, const NodeId node) {
return tc.output_constraints(node, DelayType::MIN);
}
TimingTag const_gen_tag() { return TimingTag::CONST_GEN_TAG_HOLD(); }
void merge_req_tags(const NodeId node, const Time time, const NodeId origin, const TimingTag& ref_tag, bool arrival_must_be_valid=false) {
node_tags_[node].max(time, origin, ref_tag, arrival_must_be_valid);
}
void merge_arr_tags(const NodeId node, const Time time, const NodeId origin, const TimingTag& ref_tag) {
node_tags_[node].min(time, origin, ref_tag);
}
Time data_edge_delay(const DelayCalculator& dc, const TimingGraph& tg, const EdgeId edge_id) {
Time delay = dc.min_edge_delay(tg, edge_id);
TATUM_ASSERT_MSG(delay.value() >= 0., "Data edge delay expected to be positive");
return delay;
}
Time launch_clock_edge_delay(const DelayCalculator& dc, const TimingGraph& tg, const EdgeId edge_id) {
Time delay = dc.min_edge_delay(tg, edge_id);
TATUM_ASSERT_MSG(delay.value() >= 0., "Launch clock edge delay expected to be positive");
return delay;
}
Time capture_clock_edge_delay(const DelayCalculator& dc, const TimingGraph& tg, const EdgeId edge_id) {
NodeId src_node = tg.edge_src_node(edge_id);
NodeId sink_node = tg.edge_sink_node(edge_id);
if(tg.node_type(src_node) == NodeType::CPIN && tg.node_type(sink_node) == NodeType::SINK) {
Time thld = dc.hold_time(tg, edge_id);
TATUM_ASSERT_MSG(!std::isnan(thld.value()), "Hold Time (Thld) must be numeric value (not NaN)");
//The hold time is returned as a positive value, since it is placed on the clock path
//(instead of the data path).
return thld;
} else {
Time tcq = dc.min_edge_delay(tg, edge_id);
TATUM_ASSERT_MSG(tcq.value() >= 0., "Clock-to-q delay (Tcq) expected to be positive");
return tcq;
}
}
Time calculate_slack(const Time required_time, const Time arrival_time) {
//Hold requires the arrival to occur *after* the required time, so
//slack is the amount of arrival time left after the required time; meaning
//we we subtract the required time from the arrival time to get the hold slack
return arrival_time - required_time;
}
};
}} //namespace

View File

@ -0,0 +1,115 @@
#pragma once
#include "tatum/graph_visitors/CommonAnalysisOps.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
namespace tatum { namespace detail {
/** \class SetupAnalysisOps
*
* The operations for CommonAnalysisVisitor to perform setup analysis.
* The setup analysis operations define that maximum edge delays are used, and that the
* maixmum arrival time (and minimum required times) are propagated through the timing graph.
*
* \see HoldAnalysisOps
* \see CommonAnalysisVisitor
*/
class SetupAnalysisOps : public CommonAnalysisOps {
public:
SetupAnalysisOps(size_t num_tags, size_t num_slacks)
: CommonAnalysisOps(num_tags, num_slacks) {}
Time clock_constraint(const TimingConstraints& tc, const DomainId src_id, const DomainId sink_id) {
return tc.setup_constraint(src_id, sink_id, NodeId::INVALID());
}
Time clock_constraint(const TimingConstraints& tc, const DomainId src_id, const DomainId sink_id, const NodeId capture_node) {
return tc.setup_constraint(src_id, sink_id, capture_node);
}
Time clock_uncertainty(const TimingConstraints& tc, const DomainId src_id, const DomainId sink_id) {
//Setup analysis, so early capture clock arrival is pessimistic
return -tc.setup_clock_uncertainty(src_id, sink_id);
}
Time launch_source_latency(const TimingConstraints& tc, const DomainId domain) {
//For pessimistic setup analysis launch occurs late
return tc.source_latency(domain, ArrivalType::LATE);
}
Time capture_source_latency(const TimingConstraints& tc, const DomainId domain) {
//For pessimistic setup analysis capture occurs early
return tc.source_latency(domain, ArrivalType::EARLY);
}
Time input_constraint(const TimingConstraints& tc, const NodeId node, const DomainId domain) {
return tc.input_constraint(node, domain, DelayType::MAX);
}
auto input_constraints(const TimingConstraints& tc, const NodeId node) {
return tc.input_constraints(node, DelayType::MAX);
}
Time output_constraint(const TimingConstraints& tc, const NodeId node, const DomainId domain) {
return tc.output_constraint(node, domain, DelayType::MAX);
}
auto output_constraints(const TimingConstraints& tc, const NodeId node) {
return tc.output_constraints(node, DelayType::MAX);
}
TimingTag const_gen_tag() { return TimingTag::CONST_GEN_TAG_SETUP(); }
void merge_req_tags(const NodeId node, const Time time, const NodeId origin, const TimingTag& ref_tag, bool arrival_must_be_valid=false) {
node_tags_[node].min(time, origin, ref_tag, arrival_must_be_valid);
}
void merge_arr_tags(const NodeId node, const Time time, const NodeId origin, const TimingTag& ref_tag) {
node_tags_[node].max(time, origin, ref_tag);
}
Time data_edge_delay(const DelayCalculator& dc, const TimingGraph& tg, const EdgeId edge_id) {
Time delay = dc.max_edge_delay(tg, edge_id);
TATUM_ASSERT_MSG(delay.value() >= 0., "Data edge delay expected to be positive");
return delay;
}
Time launch_clock_edge_delay(const DelayCalculator& dc, const TimingGraph& tg, const EdgeId edge_id) {
Time delay = dc.max_edge_delay(tg, edge_id);
TATUM_ASSERT_MSG(delay.value() >= 0., "Launch clock edge delay expected to be positive");
return delay;
}
Time capture_clock_edge_delay(const DelayCalculator& dc, const TimingGraph& tg, const EdgeId edge_id) {
NodeId src_node = tg.edge_src_node(edge_id);
NodeId sink_node = tg.edge_sink_node(edge_id);
if(tg.node_type(src_node) == NodeType::CPIN && tg.node_type(sink_node) == NodeType::SINK) {
Time tsu = dc.setup_time(tg, edge_id);
TATUM_ASSERT_MSG(!std::isnan(tsu.value()), "Setup Time (Tsu) expected to be numeric value (not NaN)");
//The setup time is returned as a negative value, since it is placed on the clock path
//(instead of the data path).
return -tsu;
} else {
Time tcq = dc.max_edge_delay(tg, edge_id);
TATUM_ASSERT_MSG(tcq.value() >= 0., "Clock-to-q delay (Tcq) expected to be positive");
return tcq;
}
}
Time calculate_slack(const Time required_time, const Time arrival_time) {
//Setup requires the arrival to occur *before* the required time, so
//slack is the amount of required time left after the arrival time; meaning
//we we subtract the arrival time from the required time to get the setup slack
return required_time - arrival_time;
}
};
}} //namespace

View File

@ -0,0 +1,10 @@
#pragma once
/** \file
*
* The available graph visitor (i.e. analysis mode) implementations
*/
#include "SetupAnalysis.hpp"
#include "HoldAnalysis.hpp"
#include "SetupHoldAnalysis.hpp"

View File

@ -0,0 +1,10 @@
#pragma once
/** \file
*
* The available graph walker implementations
*/
#include "graph_walkers/SerialWalker.hpp"
#include "graph_walkers/ParallelLevelizedWalker.hpp"
#include "graph_walkers/ParallelWalker.hpp"

View File

@ -0,0 +1,157 @@
#pragma once
#include "tatum/graph_walkers/TimingGraphWalker.hpp"
#include "tatum/graph_visitors/GraphVisitor.hpp"
#include "tatum/TimingGraph.hpp"
#ifdef TATUM_USE_TBB
# include <tbb/parallel_for_each.h>
# include <tbb/combinable.h>
#endif
namespace tatum {
/**
* A parallel timing analyzer which traveres the timing graph in a levelized
* manner. However nodes within each level are processed in parallel using
* Thread Building Blocks (TBB). If TBB is not available it operates serially and is
* equivalent to the SerialWalker.
*/
class ParallelLevelizedWalker : public TimingGraphWalker {
public:
void do_arrival_pre_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, GraphVisitor& visitor) override {
num_unconstrained_startpoints_ = 0;
LevelId first_level = *tg.levels().begin();
auto nodes = tg.level_nodes(first_level);
#if defined(TATUM_USE_TBB)
tbb::combinable<size_t> unconstrained_counter(zero);
tbb::parallel_for_each(nodes.begin(), nodes.end(), [&](auto node) {
bool constrained = visitor.do_arrival_pre_traverse_node(tg, tc, node);
if(!constrained) {
unconstrained_counter.local() += 1;
}
});
num_unconstrained_startpoints_ = unconstrained_counter.combine(std::plus<size_t>());
#else //Serial
for(auto iter = nodes.begin(); iter != nodes.end(); ++iter) {
bool constrained = visitor.do_arrival_pre_traverse_node(tg, tc, *iter);
if(!constrained) {
num_unconstrained_startpoints_ += 1;
}
}
#endif
}
void do_required_pre_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, GraphVisitor& visitor) override {
num_unconstrained_endpoints_ = 0;
const auto& po = tg.logical_outputs();
#if defined(TATUM_USE_TBB)
tbb::combinable<size_t> unconstrained_counter(zero);
tbb::parallel_for_each(po.begin(), po.end(), [&](auto node) {
bool constrained = visitor.do_required_pre_traverse_node(tg, tc, node);
if(!constrained) {
unconstrained_counter.local() += 1;
}
});
num_unconstrained_endpoints_ = unconstrained_counter.combine(std::plus<size_t>());
#else //Serial
for(auto iter = po.begin(); iter != po.end(); ++iter) {
bool constrained = visitor.do_required_pre_traverse_node(tg, tc, *iter);
if(!constrained) {
num_unconstrained_endpoints_ += 1;
}
}
#endif
}
void do_arrival_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, GraphVisitor& visitor) override {
for(LevelId level_id : tg.levels()) {
auto level_nodes = tg.level_nodes(level_id);
#if defined(TATUM_USE_TBB)
tbb::parallel_for_each(level_nodes.begin(), level_nodes.end(), [&](auto node) {
visitor.do_arrival_traverse_node(tg, tc, dc, node);
});
#else //Serial
for(auto iter = level_nodes.begin(); iter != level_nodes.end(); ++iter) {
visitor.do_arrival_traverse_node(tg, tc, dc, *iter);
}
#endif
}
}
void do_required_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, GraphVisitor& visitor) override {
for(LevelId level_id : tg.reversed_levels()) {
auto level_nodes = tg.level_nodes(level_id);
#if defined(TATUM_USE_TBB)
tbb::parallel_for_each(level_nodes.begin(), level_nodes.end(), [&](auto node) {
visitor.do_required_traverse_node(tg, tc, dc, node);
});
#else //Serial
for(auto iter = level_nodes.begin(); iter != level_nodes.end(); ++iter) {
visitor.do_required_traverse_node(tg, tc, dc, *iter);
}
#endif
}
}
void do_update_slack_impl(const TimingGraph& tg, const DelayCalculator& dc, GraphVisitor& visitor) override {
auto nodes = tg.nodes();
#if defined(TATUM_USE_TBB)
tbb::parallel_for_each(nodes.begin(), nodes.end(), [&](auto node) {
visitor.do_slack_traverse_node(tg, dc, node);
});
#else //Serial
for(auto iter = nodes.begin(); iter != nodes.end(); ++iter) {
visitor.do_slack_traverse_node(tg, dc, *iter);
}
#endif
}
void do_reset_impl(const TimingGraph& tg, GraphVisitor& visitor) override {
auto nodes = tg.nodes();
auto edges = tg.edges();
#if defined(TATUM_USE_TBB)
tbb::parallel_for_each(nodes.begin(), nodes.end(), [&](auto node) {
visitor.do_reset_node(node);
});
tbb::parallel_for_each(edges.begin(), edges.end(), [&](auto edge) {
visitor.do_reset_edge(edge);
});
#else //Serial
for(auto node_iter = nodes.begin(); node_iter != nodes.end(); ++node_iter) {
visitor.do_reset_node(*node_iter);
}
for(auto edge_iter = edges.begin(); edge_iter != edges.end(); ++edge_iter) {
visitor.do_reset_edge(*edge_iter);
}
#endif
}
size_t num_unconstrained_startpoints_impl() const override { return num_unconstrained_startpoints_; }
size_t num_unconstrained_endpoints_impl() const override { return num_unconstrained_endpoints_; }
private:
#if defined(TATUM_USE_TBB)
//Function to initialize tbb:combinable<size_t> to zero
// In earlier versions of TBB (e.g. v4.4) an explicit constant could be
// used as the initializer. However later versions (e.g. v2018.0)
// require that the initializer be a (thread-safe) callable.
// We therefore use an explicit function, which should work for all
// versions.
static size_t zero() { return 0; }
#endif
size_t num_unconstrained_startpoints_ = 0;
size_t num_unconstrained_endpoints_ = 0;
};
} //namepsace

View File

@ -0,0 +1,7 @@
#pragma once
#include "tatum/graph_walkers_fwd.hpp"
//Include the def'n of ParallelLevelizedWalker
#include "ParallelLevelizedWalker.hpp"

View File

@ -0,0 +1,82 @@
#pragma once
#include "tatum/graph_walkers/TimingGraphWalker.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
#include "tatum/graph_visitors/GraphVisitor.hpp"
namespace tatum {
/**
* A simple serial graph walker which traverses the timing graph in a levelized
* manner.
*/
class SerialWalker : public TimingGraphWalker {
protected:
void do_arrival_pre_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, GraphVisitor& visitor) override {
size_t num_unconstrained = 0;
LevelId first_level = *tg.levels().begin();
for(NodeId node_id : tg.level_nodes(first_level)) {
bool constrained = visitor.do_arrival_pre_traverse_node(tg, tc, node_id);
if(!constrained) {
++num_unconstrained;
}
}
num_unconstrained_startpoints_ = num_unconstrained;
}
void do_required_pre_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, GraphVisitor& visitor) override {
size_t num_unconstrained = 0;
for(NodeId node_id : tg.logical_outputs()) {
bool constrained = visitor.do_required_pre_traverse_node(tg, tc, node_id);
if(!constrained) {
++num_unconstrained;
}
}
num_unconstrained_endpoints_ = num_unconstrained;
}
void do_arrival_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, GraphVisitor& visitor) override {
for(LevelId level_id : tg.levels()) {
for(NodeId node_id : tg.level_nodes(level_id)) {
visitor.do_arrival_traverse_node(tg, tc, dc, node_id);
}
}
}
void do_required_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, GraphVisitor& visitor) override {
for(LevelId level_id : tg.reversed_levels()) {
for(NodeId node_id : tg.level_nodes(level_id)) {
visitor.do_required_traverse_node(tg, tc, dc, node_id);
}
}
}
void do_update_slack_impl(const TimingGraph& tg, const DelayCalculator& dc, GraphVisitor& visitor) override {
for(NodeId node : tg.nodes()) {
visitor.do_slack_traverse_node(tg, dc, node);
}
}
void do_reset_impl(const TimingGraph& tg, GraphVisitor& visitor) override {
for(NodeId node_id : tg.nodes()) {
visitor.do_reset_node(node_id);
}
for(EdgeId edge_id : tg.edges()) {
visitor.do_reset_edge(edge_id);
}
}
size_t num_unconstrained_startpoints_impl() const override { return num_unconstrained_startpoints_; }
size_t num_unconstrained_endpoints_impl() const override { return num_unconstrained_endpoints_; }
private:
size_t num_unconstrained_startpoints_ = 0;
size_t num_unconstrained_endpoints_ = 0;
};
} //namepsace

View File

@ -0,0 +1,154 @@
#pragma once
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
#include "tatum/graph_visitors/GraphVisitor.hpp"
#include <chrono>
#include <map>
namespace tatum {
/**
* The abstract base class for all TimingGraphWalkers.
*
* TimingGraphWalker encapsulates the process of traversing the timing graph, exposing
* only the do_*_traversal() methods, which can be called by TimingAnalyzers
*
* Internally the do_*_traversal() methods measure record performance related information
* and delegate to concrete sub-classes via the do_*_traversal_impl() virtual methods.
*
* \see GraphVisitor
* \see TimingAnalyzer
*/
class TimingGraphWalker {
public:
virtual ~TimingGraphWalker() = default;
///Performs the arrival time pre-traversal
///\param tg The timing graph
///\param tc The timing constraints
///\param visitor The visitor to apply during the traversal
void do_arrival_pre_traversal(const TimingGraph& tg, const TimingConstraints& tc, GraphVisitor& visitor) {
auto start_time = Clock::now();
do_arrival_pre_traversal_impl(tg, tc, visitor);
profiling_data_["arrival_pre_traversal_sec"] = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
}
///Performs the required time pre-traversal
///\param tg The timing graph
///\param tc The timing constraints
///\param visitor The visitor to apply during the traversal
void do_required_pre_traversal(const TimingGraph& tg, const TimingConstraints& tc, GraphVisitor& visitor) {
auto start_time = Clock::now();
do_required_pre_traversal_impl(tg, tc, visitor);
profiling_data_["required_pre_traversal_sec"] = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
}
///Performs the arrival time traversal
///\param tg The timing graph
///\param dc The edge delay calculator
///\param visitor The visitor to apply during the traversal
void do_arrival_traversal(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, GraphVisitor& visitor) {
auto start_time = Clock::now();
do_arrival_traversal_impl(tg, tc, dc, visitor);
profiling_data_["arrival_traversal_sec"] = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
}
///Performs the required time traversal
///\param tg The timing graph
///\param dc The edge delay calculator
///\param visitor The visitor to apply during the traversal
void do_required_traversal(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, GraphVisitor& visitor) {
auto start_time = Clock::now();
do_required_traversal_impl(tg, tc, dc, visitor);
profiling_data_["required_traversal_sec"] = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
}
void do_reset(const TimingGraph& tg, GraphVisitor& visitor) {
auto start_time = Clock::now();
do_reset_impl(tg, visitor);
profiling_data_["reset_sec"] = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
}
void do_update_slack(const TimingGraph& tg, const DelayCalculator& dc, GraphVisitor& visitor) {
auto start_time = Clock::now();
do_update_slack_impl(tg, dc, visitor);
profiling_data_["update_slack_sec"] = std::chrono::duration_cast<dsec>(Clock::now() - start_time).count();
}
///Retrieve profiling information
///\param key The profiling key
///\returns The profiling value for the given key, or NaN if the key is not found
double get_profiling_data(std::string key) const {
auto iter = profiling_data_.find(key);
if(iter != profiling_data_.end()) {
return iter->second;
} else {
return std::numeric_limits<double>::quiet_NaN();
}
}
void set_profiling_data(std::string key, double val) {
profiling_data_[key] = val;
}
size_t num_unconstrained_startpoints() const { return num_unconstrained_startpoints_impl(); }
size_t num_unconstrained_endpoints() const { return num_unconstrained_endpoints_impl(); }
protected:
///Sub-class defined arrival time pre-traversal
///\param tg The timing graph
///\param tc The timing constraints
///\param visitor The visitor to apply during the traversal
virtual void do_arrival_pre_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, GraphVisitor& visitor) = 0;
///Sub-class defined required time pre-traversal
///\param tg The timing graph
///\param tc The timing constraints
///\param dc The edge delay calculator
///\param visitor The visitor to apply during the traversal
virtual void do_required_pre_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, GraphVisitor& visitor) = 0;
///Sub-class defined arrival time traversal
///Performs the required time traversal
///\param tg The timing graph
///\param dc The edge delay calculator
///\param visitor The visitor to apply during the traversal
virtual void do_arrival_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, GraphVisitor& visitor) = 0;
///Sub-class defined required time traversal
///Performs the required time traversal
///\param tg The timing graph
///\param dc The edge delay calculator
///\param visitor The visitor to apply during the traversal
virtual void do_required_traversal_impl(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, GraphVisitor& visitor) = 0;
///Sub-class defined reset in preparation for a timing update
virtual void do_reset_impl(const TimingGraph& tg, GraphVisitor& visitor) = 0;
///Sub-class defined slack calculation
virtual void do_update_slack_impl(const TimingGraph& tg, const DelayCalculator& dc, GraphVisitor& visitor) = 0;
virtual size_t num_unconstrained_startpoints_impl() const = 0;
virtual size_t num_unconstrained_endpoints_impl() const = 0;
private:
std::map<std::string, double> profiling_data_;
typedef std::chrono::duration<double> dsec;
typedef std::chrono::high_resolution_clock Clock;
};
} //namepsace

View File

@ -0,0 +1,13 @@
#pragma once
namespace tatum {
class SerialWalker;
class ParallelLevelizedWalker;
///The default parallel graph walker
using ParallelWalker = ParallelLevelizedWalker;
} //namespace

View File

@ -0,0 +1,65 @@
#ifndef TATUM_NODE_NUM_NAME_RESOLVER_HPP
#define TATUM_NODE_NUM_NAME_RESOLVER_HPP
#include "tatum/TimingGraphNameResolver.hpp"
namespace tatum {
//A name resolver which just resolved to node ID's and node types
class NodeNumResolver : public TimingGraphNameResolver {
public:
NodeNumResolver(const TimingGraph& tg, const DelayCalculator& dc, bool verbose)
: tg_(tg)
, dc_(dc)
, verbose_(verbose) {}
std::string node_name(NodeId node) const override {
return "Node(" + std::to_string(size_t(node)) + ")";
}
std::string node_type_name(NodeId node) const override {
auto type = tg_.node_type(node);
std::stringstream ss;
ss << type;
return ss.str();
}
EdgeDelayBreakdown edge_delay_breakdown(EdgeId edge, DelayType delay_type) const override {
EdgeDelayBreakdown delay_breakdown;
if (edge && verbose_) {
auto edge_type = tg_.edge_type(edge);
DelayComponent component;
component.inst_name = "Edge(" + std::to_string(size_t(edge)) + ")";
std::stringstream ss;
ss << edge_type;
component.type_name = ss.str();
if (delay_type == DelayType::MAX) {
if (edge_type == EdgeType::PRIMITIVE_CLOCK_CAPTURE) {
component.delay = dc_.setup_time(tg_, edge);
} else {
component.delay = dc_.max_edge_delay(tg_, edge);
}
} else if (delay_type == DelayType::MIN) {
if (edge_type == EdgeType::PRIMITIVE_CLOCK_CAPTURE) {
component.delay = dc_.hold_time(tg_, edge);
} else {
component.delay = dc_.min_edge_delay(tg_, edge);
}
}
delay_breakdown.components.push_back(component);
}
return delay_breakdown;
}
private:
const TimingGraph& tg_;
const DelayCalculator& dc_;
bool verbose_;
};
} //namespace
#endif

View File

@ -0,0 +1,23 @@
#ifndef TATUM_SKEW_PATH_HPP
#define TATUM_SKEW_PATH_HPP
namespace tatum {
struct SkewPath {
DomainId launch_domain;
DomainId capture_domain;
TimingSubPath clock_launch_path;
TimingSubPath clock_capture_path;
NodeId data_launch_node;
NodeId data_capture_node;
Time clock_launch_arrival;
Time clock_capture_arrival;
Time clock_constraint;
Time clock_skew;
};
}
#endif

View File

@ -0,0 +1,160 @@
#ifndef TATUM_TIMING_PATH_HPP
#define TATUM_TIMING_PATH_HPP
#include <vector>
#include "tatum/report/TimingPathFwd.hpp"
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/base/TimingType.hpp"
#include "tatum/Time.hpp"
#include "tatum/tags/TimingTag.hpp"
#include "tatum/util/tatum_assert.hpp"
#include "tatum/util/tatum_range.hpp"
namespace tatum {
//High level information about a timing path
class TimingPathInfo {
public:
TimingPathInfo() = default;
TimingPathInfo(TimingType path_type, Time path_delay, Time path_slack, NodeId launch_n, NodeId capture_n, DomainId launch_d, DomainId capture_d)
: path_type_(path_type)
, delay_(path_delay)
, slack_(path_slack)
, startpoint_(launch_n)
, endpoint_(capture_n)
, launch_domain_(launch_d)
, capture_domain_(capture_d) {}
TimingType type() { return path_type_; }
Time delay() const { return delay_; }
Time slack() const { return slack_; }
NodeId startpoint() const { return startpoint_; } //Note may be NodeId::INVALID() for functions which don't fully trace the timing path
NodeId endpoint() const { return endpoint_; }
DomainId launch_domain() const { return launch_domain_; }
DomainId capture_domain() const { return capture_domain_; }
private:
TimingType path_type_ = TimingType::UNKOWN;
Time delay_;
Time slack_;
//The timing source and sink which launched,
//and captured the data
NodeId startpoint_;
NodeId endpoint_;
//The clock domains
DomainId launch_domain_;
DomainId capture_domain_;
};
//A component/point along a timing path
class TimingPathElem {
public:
TimingPathElem() = default;
TimingPathElem(TimingTag tag_v,
NodeId node_v,
EdgeId incomming_edge_v) noexcept
: tag_(tag_v)
, node_(node_v)
, incomming_edge_(incomming_edge_v) {
//pass
}
public: //Accessors
const TimingTag& tag() const { return tag_; }
NodeId node() const { return node_; }
EdgeId incomming_edge() const { return incomming_edge_; }
public: //Mutators
void set_incomming_edge(EdgeId edge) { incomming_edge_ = edge; }
private:
TimingTag tag_;
NodeId node_;
EdgeId incomming_edge_;
};
//One sub-path of a timing path (e.g. clock launch path, data path, clock capture path)
class TimingSubPath {
public:
typedef std::vector<TimingPathElem>::const_iterator path_elem_iterator;
typedef util::Range<path_elem_iterator> path_elem_range;
public:
TimingSubPath() = default;
TimingSubPath(std::vector<TimingPathElem> elems)
: elements_(elems) {}
path_elem_range elements() const {
return util::make_range(elements_.begin(),
elements_.end());
}
private:
std::vector<TimingPathElem> elements_;
};
//A collection of timing path elements which form
//a timing path.
class TimingPath {
public:
typedef std::vector<TimingPathElem>::const_iterator path_elem_iterator;
typedef util::Range<path_elem_iterator> path_elem_range;
public:
TimingPath() = default;
TimingPath(const TimingPathInfo& info,
TimingSubPath clock_launch,
TimingSubPath data_arrival,
TimingSubPath clock_capture,
const TimingPathElem& data_required_elem,
const TimingTag& slack)
: path_info_(info)
, clock_launch_path_(clock_launch)
, data_arrival_path_(data_arrival)
, clock_capture_path_(clock_capture)
, data_required_element_(data_required_elem)
, slack_tag_(slack) {
//pass
}
public:
const TimingPathInfo& path_info() const { return path_info_; }
const TimingSubPath& clock_launch_path() const {
return clock_launch_path_;
}
const TimingSubPath& data_arrival_path() const {
return data_arrival_path_;
}
const TimingSubPath& clock_capture_path() const {
return clock_capture_path_;
}
TimingPathElem data_required_element() const {
return data_required_element_;
}
const TimingTag& slack_tag() const { return slack_tag_; }
private:
TimingPathInfo path_info_;
TimingSubPath clock_launch_path_;
TimingSubPath data_arrival_path_;
TimingSubPath clock_capture_path_;
TimingPathElem data_required_element_;
TimingTag slack_tag_;
};
} //namespace
#endif

View File

@ -0,0 +1,189 @@
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
#include "tatum/report/TimingPathCollector.hpp"
#include "tatum/report/TimingReportTagRetriever.hpp"
#include "tatum/report/timing_path_tracing.hpp"
#include <map>
namespace tatum {
namespace detail {
std::vector<TimingPath> collect_worst_timing_paths(const TimingGraph& timing_graph, const detail::TagRetriever& tag_retriever, size_t npaths);
std::vector<SkewPath> collect_worst_skew_paths(const TimingGraph& timing_graph, const TimingConstraints& timing_constraings,
const detail::TagRetriever& tag_retriever, TimingType timing_type, size_t npaths);
std::vector<TimingPath> collect_worst_timing_paths(const TimingGraph& timing_graph, const detail::TagRetriever& tag_retriever, size_t npaths) {
std::vector<TimingPath> paths;
struct TagNode {
TagNode(TimingTag t, NodeId n) noexcept
: tag(t), node(n) {}
TimingTag tag;
NodeId node;
};
std::vector<TagNode> tags_and_sinks;
//Add the slacks of all sink
for(NodeId node : timing_graph.logical_outputs()) {
for(TimingTag tag : tag_retriever.slacks(node)) {
tags_and_sinks.emplace_back(tag,node);
}
}
//Sort in ascending slack order so most negative slacks are first
auto ascending_slack_order = [](const TagNode& lhs, const TagNode& rhs) {
return lhs.tag.time() < rhs.tag.time();
};
std::sort(tags_and_sinks.begin(), tags_and_sinks.end(), ascending_slack_order);
//Trace the paths for each tag/node pair
// The the map will sort the nodes and tags from worst to best slack (i.e. ascending slack order),
// so the first pair is the most critical end-point
for(const auto& tag_node : tags_and_sinks) {
NodeId sink_node = tag_node.node;
TimingTag sink_tag = tag_node.tag;
TimingPath path = detail::trace_path(timing_graph, tag_retriever, sink_tag.launch_clock_domain(), sink_tag.capture_clock_domain(), sink_node);
paths.push_back(path);
if(paths.size() >= npaths) break;
}
return paths;
}
std::vector<SkewPath> collect_worst_skew_paths(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints,
const detail::TagRetriever& tag_retriever, TimingType timing_type, size_t npaths) {
std::vector<SkewPath> paths;
for(NodeId node : timing_graph.nodes()) {
NodeType node_type = timing_graph.node_type(node);
if (node_type != NodeType::SINK) continue;
const auto& required_tags = tag_retriever.tags(node, TagType::DATA_REQUIRED);
for (auto& required_tag : required_tags) {
SkewPath path;
path.launch_domain = required_tag.launch_clock_domain();
path.capture_domain = required_tag.capture_clock_domain();
TimingSubPath data_arrival_path = detail::trace_data_arrival_path(timing_graph, tag_retriever, path.launch_domain, path.capture_domain, node);
TATUM_ASSERT(!data_arrival_path.elements().empty());
auto& data_launch_elem = *data_arrival_path.elements().begin();
//Constant generators do not have skew
if (is_const_gen_tag(data_launch_elem.tag())) continue;
path.data_launch_node = data_launch_elem.node();
path.data_capture_node = node;
path.clock_launch_path = detail::trace_clock_launch_path(timing_graph, tag_retriever, path.launch_domain, path.capture_domain, path.data_launch_node);
path.clock_capture_path = detail::trace_clock_capture_path(timing_graph, tag_retriever, path.launch_domain, path.capture_domain, path.data_capture_node);
if (path.clock_launch_path.elements().empty()) {
//Primary input
path.clock_launch_arrival = data_launch_elem.tag().time();
//Adjust for input delay
if (timing_type == TimingType::SETUP) {
path.clock_launch_arrival -= timing_constraints.input_constraint(path.data_launch_node, path.launch_domain, DelayType::MAX);
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
path.clock_launch_arrival -= timing_constraints.input_constraint(path.data_launch_node, path.launch_domain, DelayType::MIN);
}
} else {
//FF source
path.clock_launch_arrival = path_end(path.clock_launch_path);
}
if (path.clock_capture_path.elements().empty()) {
//Primary output
path.clock_capture_arrival = required_tag.time();
//Adjust for output delay and clock uncertainty
if (timing_type == TimingType::SETUP) {
path.clock_capture_arrival += timing_constraints.output_constraint(path.data_capture_node, path.capture_domain, DelayType::MAX);
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
path.clock_capture_arrival += timing_constraints.output_constraint(path.data_capture_node, path.capture_domain, DelayType::MIN);
}
//TODO: need to think about why we don't need to adjust for uncertainty on these paths...
} else {
//FF capture
path.clock_capture_arrival = path_end(path.clock_capture_path);
//Adjust for clock uncertainty
if (timing_type == TimingType::SETUP) {
path.clock_capture_arrival -= timing_constraints.setup_clock_uncertainty(path.launch_domain, path.capture_domain);
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
path.clock_capture_arrival += timing_constraints.hold_clock_uncertainty(path.launch_domain, path.capture_domain);
}
}
//Record period constraint
if (timing_type == TimingType::SETUP) {
path.clock_constraint = timing_constraints.setup_constraint(path.launch_domain, path.capture_domain);
} else {
TATUM_ASSERT(timing_type == TimingType::HOLD);
path.clock_constraint = timing_constraints.hold_constraint(path.launch_domain, path.capture_domain);
}
path.clock_skew = path.clock_capture_arrival - path.clock_launch_arrival - path.clock_constraint;
paths.push_back(path);
}
}
auto skew_order = [&](const SkewPath& lhs, const SkewPath& rhs) {
if (timing_type == TimingType::SETUP) {
//Positive skew helps setup paths (since the capture clock edge is delayed,
//lengthening the clock period), so show the most negative skews first.
return lhs.clock_skew < rhs.clock_skew;
} else {
//Positive skew hurts hold paths (since the capture clock edge is delay,
//this gives the data more time to catch-up to the capture clock),
//so show the most positive skews first.
TATUM_ASSERT(timing_type == TimingType::HOLD);
return lhs.clock_skew > rhs.clock_skew;
}
};
std::sort(paths.begin(), paths.end(), skew_order);
//TODO: not very efficient, since we generate all paths first and then trim to npaths...
paths.resize(std::min(paths.size(), npaths));
return paths;
}
} //namespace detail
std::vector<TimingPath> TimingPathCollector::collect_worst_setup_timing_paths(const TimingGraph& timing_graph, const tatum::SetupTimingAnalyzer& setup_analyzer, size_t npaths) const {
detail::SetupTagRetriever tag_retriever(setup_analyzer);
return collect_worst_timing_paths(timing_graph, tag_retriever, npaths);
}
std::vector<TimingPath> TimingPathCollector::collect_worst_hold_timing_paths(const TimingGraph& timing_graph, const tatum::HoldTimingAnalyzer& hold_analyzer, size_t npaths) const {
detail::HoldTagRetriever tag_retriever(hold_analyzer);
return collect_worst_timing_paths(timing_graph, tag_retriever, npaths);
}
std::vector<SkewPath> TimingPathCollector::collect_worst_setup_skew_paths(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints, const tatum::SetupTimingAnalyzer& setup_analyzer, size_t npaths) const {
detail::SetupTagRetriever tag_retriever(setup_analyzer);
return collect_worst_skew_paths(timing_graph, timing_constraints, tag_retriever, TimingType::SETUP, npaths);
}
std::vector<SkewPath> TimingPathCollector::collect_worst_hold_skew_paths(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints, const tatum::HoldTimingAnalyzer& hold_analyzer, size_t npaths) const {
detail::HoldTagRetriever tag_retriever(hold_analyzer);
return collect_worst_skew_paths(timing_graph, timing_constraints, tag_retriever, TimingType::HOLD, npaths);
}
} //namespace tatum

View File

@ -0,0 +1,20 @@
#ifndef TATUM_TIMING_PATH_COLLECTOR_HPP
#define TATUM_TIMING_PATH_COLLECTOR_HPP
#include "tatum/timing_analyzers_fwd.hpp"
#include "TimingPath.hpp"
#include "SkewPath.hpp"
namespace tatum {
class TimingPathCollector {
public:
std::vector<TimingPath> collect_worst_setup_timing_paths(const TimingGraph& timing_graph, const tatum::SetupTimingAnalyzer& setup_analyzer, size_t npaths) const;
std::vector<TimingPath> collect_worst_hold_timing_paths(const TimingGraph& timing_graph, const tatum::HoldTimingAnalyzer& hold_analyzer, size_t npaths) const;
std::vector<SkewPath> collect_worst_setup_skew_paths(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints, const tatum::SetupTimingAnalyzer& setup_analyzer, size_t npaths) const;
std::vector<SkewPath> collect_worst_hold_skew_paths(const TimingGraph& timing_graph, const TimingConstraints& timing_constraints, const tatum::HoldTimingAnalyzer& hold_analyzer, size_t npaths) const;
};
} //namespace
#endif

View File

@ -0,0 +1,12 @@
#ifndef TATUM_TIMING_PATH_FWD_HPP
#define TATUM_TIMING_PATH_FWD_HPP
namespace tatum {
class TimingPathInfo;
class TimingPathElem;
class TimingPath;
} //namespace
#endif

View File

@ -0,0 +1,70 @@
#ifndef TATUM_TIMING_REPORT_TAG_RETRIEVER_HPP
#define TATUM_TIMING_REPORT_TAG_RETRIEVER_HPP
#include "tatum/timing_analyzers.hpp"
#include "tatum/report/TimingPathFwd.hpp"
#include "tatum/base/TimingType.hpp"
namespace tatum { namespace detail {
//An abstract interface for retrieving tag information
//
//This is useful for reporting routings which can work for either setup or hold
class TagRetriever {
public:
virtual ~TagRetriever() = default;
virtual TimingTags::tag_range tags(NodeId node) const = 0;
virtual TimingTags::tag_range tags(NodeId node, TagType tag_type) const = 0;
virtual TimingTags::tag_range slacks(NodeId node) const = 0;
virtual TimingType type() const = 0;
};
class SetupTagRetriever : public TagRetriever {
public:
SetupTagRetriever(const SetupTimingAnalyzer& analyzer): analyzer_(analyzer) {}
TimingTags::tag_range tags(NodeId node) const override {
return analyzer_.setup_tags(node);
}
TimingTags::tag_range tags(NodeId node, TagType tag_type) const override {
return analyzer_.setup_tags(node, tag_type);
}
TimingTags::tag_range slacks(NodeId node) const override {
return analyzer_.setup_slacks(node);
}
TimingType type() const override {
return TimingType::SETUP;
}
private:
const SetupTimingAnalyzer& analyzer_;
};
class HoldTagRetriever : public TagRetriever {
public:
HoldTagRetriever(const HoldTimingAnalyzer& analyzer): analyzer_(analyzer) {}
TimingTags::tag_range tags(NodeId node) const override {
return analyzer_.hold_tags(node);
}
TimingTags::tag_range tags(NodeId node, TagType tag_type) const override {
return analyzer_.hold_tags(node, tag_type);
}
TimingTags::tag_range slacks(NodeId node) const override {
return analyzer_.hold_slacks(node);
}
TimingType type() const override {
return TimingType::HOLD;
}
private:
const HoldTimingAnalyzer& analyzer_;
};
}} //namespace
#endif

View File

@ -0,0 +1,234 @@
#include "graphviz_dot_writer.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/base/sta_util.hpp"
#include <iostream>
#include <sstream>
#include <iterator>
#include <string>
namespace tatum {
constexpr size_t MAX_DOT_GRAPH_NODES = 1000;
GraphvizDotWriter make_graphviz_dot_writer(const TimingGraph& tg, const DelayCalculator& delay_calc) {
return GraphvizDotWriter(tg, delay_calc);
}
GraphvizDotWriter::GraphvizDotWriter(const TimingGraph& tg, const DelayCalculator& delay_calc)
: tg_(tg)
, delay_calc_(delay_calc) {
//By default dump all nodes
auto nodes = tg_.nodes();
nodes_to_dump_ = std::set<NodeId>(nodes.begin(), nodes.end());
}
void GraphvizDotWriter::write_dot_file(std::string filename) {
std::ofstream os(filename);
write_dot_file(os);
}
void GraphvizDotWriter::write_dot_file(std::ostream& os) {
std::map<NodeId,std::vector<TimingTag>> node_tags;
for(NodeId node : nodes_to_dump_) {
node_tags[node] = std::vector<TimingTag>(); //No tags
}
std::map<NodeId,std::vector<TimingTag>> node_slacks;
for(NodeId node : nodes_to_dump_) {
node_slacks[node] = std::vector<TimingTag>(); //No slacks
}
TimingType timing_type = TimingType::UNKOWN;
write_dot_format(os, node_tags, node_slacks, timing_type);
}
void GraphvizDotWriter::write_dot_file(std::string filename, const SetupTimingAnalyzer& analyzer) {
std::ofstream os(filename);
write_dot_file(os, analyzer);
}
void GraphvizDotWriter::write_dot_file(std::string filename, const HoldTimingAnalyzer& analyzer) {
std::ofstream os(filename);
write_dot_file(os, analyzer);
}
void GraphvizDotWriter::write_dot_file(std::ostream& os, const SetupTimingAnalyzer& analyzer) {
std::map<NodeId,std::vector<TimingTag>> node_tags;
std::map<NodeId,std::vector<TimingTag>> node_slacks;
TimingType timing_type = TimingType::SETUP;
for(NodeId node : nodes_to_dump_) {
auto tags = analyzer.setup_tags(node);
std::copy(tags.begin(), tags.end(), std::back_inserter(node_tags[node]));
auto slacks = analyzer.setup_slacks(node);
std::copy(slacks.begin(), slacks.end(), std::back_inserter(node_slacks[node]));
}
write_dot_format(os, node_tags, node_slacks, timing_type);
}
void GraphvizDotWriter::write_dot_file(std::ostream& os, const HoldTimingAnalyzer& analyzer) {
std::map<NodeId,std::vector<TimingTag>> node_tags;
std::map<NodeId,std::vector<TimingTag>> node_slacks;
TimingType timing_type = TimingType::HOLD;
for(NodeId node : nodes_to_dump_) {
auto tags = analyzer.hold_tags(node);
std::copy(tags.begin(), tags.end(), std::back_inserter(node_tags[node]));
auto slacks = analyzer.hold_slacks(node);
std::copy(slacks.begin(), slacks.end(), std::back_inserter(node_slacks[node]));
}
write_dot_format(os, node_tags, node_slacks, timing_type);
}
void GraphvizDotWriter::write_dot_format(std::ostream& os,
const std::map<NodeId,std::vector<TimingTag>>& node_tags,
const std::map<NodeId,std::vector<TimingTag>>& node_slacks,
TimingType timing_type) {
os << "digraph G {" << std::endl;
os << "\tnode[shape=record]" << std::endl;
for(const NodeId node: nodes_to_dump_) {
auto tag_iter = node_tags.find(node);
TATUM_ASSERT(tag_iter != node_tags.end());
auto slack_iter = node_slacks.find(node);
TATUM_ASSERT(slack_iter != node_slacks.end());
write_dot_node(os, node, tag_iter->second, slack_iter->second);
}
for(const LevelId level : tg_.levels()) {
write_dot_level(os, level);
}
for(const EdgeId edge : tg_.edges()) {
write_dot_edge(os, edge, timing_type);
}
os << "}" << std::endl;
}
void GraphvizDotWriter::write_dot_node(std::ostream& os,
const NodeId node,
const std::vector<TimingTag>& tags,
const std::vector<TimingTag>& slacks) {
os << "\t";
os << node_name(node);
os << "[label=\"";
os << "{" << node << " (" << tg_.node_type(node) << ")";
for(const auto& tag_set : {tags, slacks}) {
for(const auto& tag : tag_set) {
os << " | {";
os << tag.type() << "\\n";
tag_domain_from_to(os, tag);
if(tag.origin_node()) {
if(tag.type() == TagType::CLOCK_LAUNCH || tag.type() == TagType::CLOCK_CAPTURE || tag.type() == TagType::DATA_ARRIVAL) {
os << " from ";
} else {
os << " for ";
}
os << tag.origin_node();
} else {
os << " [Origin] ";
}
os << "\\n";
os << "time: " << tag.time().value();
os << "}";
}
}
os << "}\"]";
os << std::endl;
}
void GraphvizDotWriter::write_dot_level(std::ostream& os, const LevelId level) {
os << "\t{rank = same; ";
for(const NodeId node : tg_.level_nodes(level)) {
if(nodes_to_dump_.count(node)) {
os << node_name(node) <<"; ";
}
}
os << "}" << std::endl;
}
void GraphvizDotWriter::write_dot_edge(std::ostream& os, const EdgeId edge, const TimingType timing_type) {
NodeId src_node = tg_.edge_src_node(edge);
NodeId sink_node = tg_.edge_sink_node(edge);
if(nodes_to_dump_.count(src_node) && nodes_to_dump_.count(sink_node)) {
//Only draw edges to nodes in the set of nodes being printed
EdgeType edge_type = tg_.edge_type(edge);
std::string color = "";
os << "\t" << node_name(src_node) << " -> " << node_name(sink_node);
os << " [ label=\"" << edge;
if(edge_type == EdgeType::PRIMITIVE_CLOCK_CAPTURE) {
color = CLOCK_CAPTURE_EDGE_COLOR;
if (timing_type == TimingType::SETUP) {
os << "\\n"<< -delay_calc_.setup_time(tg_, edge) << " (-tsu)";
} else if (timing_type == TimingType::HOLD) {
os << "\\n"<< delay_calc_.hold_time(tg_, edge) << " (thld)";
} else {
TATUM_ASSERT(timing_type == TimingType::UNKOWN);
//Create both setup and hold edges if type is unknown
os << "\\n"<< -delay_calc_.setup_time(tg_, edge) << " (-tsu)";
os << "\\n"<< delay_calc_.hold_time(tg_, edge) << " (thld)";
}
} else if(edge_type == EdgeType::PRIMITIVE_CLOCK_LAUNCH) {
color = CLOCK_LAUNCH_EDGE_COLOR;
os << "\\n" << delay_calc_.max_edge_delay(tg_, edge) << " (tcq)";
} else {
//Combinational edge
if (timing_type == TimingType::SETUP) {
os << "\\n" << delay_calc_.max_edge_delay(tg_, edge);
} else if (timing_type == TimingType::HOLD) {
os << "\\n" << delay_calc_.min_edge_delay(tg_, edge);
} else {
TATUM_ASSERT(timing_type == TimingType::UNKOWN);
os << "\\n" << delay_calc_.max_edge_delay(tg_, edge) << " (tmax)";
os << "\\n" << delay_calc_.min_edge_delay(tg_, edge) << " (tmin)";
}
}
if(tg_.edge_disabled(edge)) {
os << "\\n" << "(disabled)";
}
os << "\""; //end label
if(tg_.edge_disabled(edge)) {
os << " style=\"dashed\"";
os << " color=\"" << DISABLED_EDGE_COLOR << "\"";
os << " fontcolor=\"" << DISABLED_EDGE_COLOR << "\"";
} else if (!color.empty()) {
os << " color=\"" + color + "\"";
}
os << "]";
os << ";" <<std::endl;
}
}
void GraphvizDotWriter::tag_domain_from_to(std::ostream& os, const TimingTag& tag) {
if(!tag.launch_clock_domain()) {
os << "*";
} else {
os << tag.launch_clock_domain();
}
os << " to ";
if(!tag.capture_clock_domain()) {
os << "*";
} else {
os << tag.capture_clock_domain();
}
}
std::string GraphvizDotWriter::node_name(const NodeId node) {
return "node" + std::to_string(size_t(node));
}
} //namespace

View File

@ -0,0 +1,82 @@
#ifndef GRAPHVIZ_DOT_WRITER_HPP
#define GRAPHVIZ_DOT_WRITER_HPP
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/timing_analyzers.hpp"
#include "tatum/delay_calc/DelayCalculator.hpp"
#include "tatum/report/TimingPathFwd.hpp"
#include "tatum/base/TimingType.hpp"
#include <vector>
#include <fstream>
#include <map>
#include <set>
namespace tatum {
/*
* These routines dump out the timing graph into a graphviz file (in DOT format)
*
* This has proven invaluable for debugging. DOT files can be viewed interactively
* using the 'xdot' tool.
*
* If the delay calculator is provided, edge delayes are included in the output
* If the analyzer is provided, the calculated timing tags are included in the output
*
* If a specific set of nodes is provided, these will be the nodes included in the output
* (if no nodes are provided, the default, then the whole graph will be included in the output,
* unless it is too large to be practical).
*/
class GraphvizDotWriter {
public:
GraphvizDotWriter(const TimingGraph& tg, const DelayCalculator& delay_calc);
//Specify a subset of nodes to dump
template<class Container>
void set_nodes_to_dump(const Container& nodes) {
nodes_to_dump_ = std::set<NodeId>(nodes.begin(), nodes.end());
}
//Write the dot file with no timing tags
void write_dot_file(std::string filename);
void write_dot_file(std::ostream& os);
//Write the dot file with timing tags
void write_dot_file(std::string filename, const SetupTimingAnalyzer& analyzer);
void write_dot_file(std::ostream& os, const SetupTimingAnalyzer& analyzer);
void write_dot_file(std::string filename, const HoldTimingAnalyzer& analyzer);
void write_dot_file(std::ostream& os, const HoldTimingAnalyzer& analyzer);
private:
void write_dot_format(std::ostream& os,
const std::map<NodeId,std::vector<TimingTag>>& node_tags,
const std::map<NodeId,std::vector<TimingTag>>& node_slacks,
const TimingType timing_type);
void write_dot_node(std::ostream& os,
const NodeId node,
const std::vector<TimingTag>& tags,
const std::vector<TimingTag>& slacks);
void write_dot_level(std::ostream& os,
const LevelId level);
void write_dot_edge(std::ostream& os,
const EdgeId edge,
const TimingType timing_type);
void tag_domain_from_to(std::ostream& os, const TimingTag& tag);
std::string node_name(NodeId node);
private:
constexpr static size_t MAX_DOT_GRAPH_NODES = 1000; //Graphviz can't handle large numbers of nodes
constexpr static const char* CLOCK_CAPTURE_EDGE_COLOR = "#c45403"; //Orange-red
constexpr static const char* CLOCK_LAUNCH_EDGE_COLOR = "#10c403"; //Green
constexpr static const char* DISABLED_EDGE_COLOR = "#aaaaaa"; //Grey
const TimingGraph& tg_;
std::set<NodeId> nodes_to_dump_;
const DelayCalculator& delay_calc_;
};
GraphvizDotWriter make_graphviz_dot_writer(const TimingGraph& tg, const DelayCalculator& delay_calc);
} //namespace
#endif

View File

@ -0,0 +1,293 @@
#include "tatum/report/timing_path_tracing.hpp"
#include "tatum/report/TimingPath.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/tags/TimingTags.hpp"
namespace tatum { namespace detail {
NodeId find_startpoint(const TimingSubPath& path);
NodeId find_endpoint(const TimingSubPath& path);
TimingPath trace_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId data_capture_node) {
TATUM_ASSERT(timing_graph.node_type(data_capture_node) == NodeType::SINK);
//Record the slack at the sink node
TimingTag slack_tag;
auto slack_tags = tag_retriever.slacks(data_capture_node);
auto iter = find_tag(slack_tags, launch_domain, capture_domain);
TATUM_ASSERT(iter != slack_tags.end());
slack_tag = *iter;
TimingSubPath data_arrival_path = trace_data_arrival_path(timing_graph,
tag_retriever,
launch_domain,
capture_domain,
data_capture_node);
TATUM_ASSERT(!data_arrival_path.elements().empty());
NodeId data_launch_node = data_arrival_path.elements().begin()->node();
TimingSubPath clock_launch_path = trace_clock_launch_path(timing_graph,
tag_retriever,
launch_domain,
capture_domain,
data_launch_node);
TimingSubPath clock_capture_path = trace_clock_capture_path(timing_graph,
tag_retriever,
launch_domain,
capture_domain,
data_capture_node);
//Record the required time
auto required_tags = tag_retriever.tags(data_capture_node, TagType::DATA_REQUIRED);
auto req_iter = find_tag(required_tags, launch_domain, capture_domain);
TATUM_ASSERT(req_iter != required_tags.end());
TimingPathElem data_required_element = TimingPathElem(*req_iter, data_capture_node, EdgeId::INVALID());
EdgeId clock_capture_edge = timing_graph.node_clock_capture_edge(data_capture_node);
if(clock_capture_edge) {
//Mark the edge between clock and data paths (i.e. setup/hold edge)
data_required_element.set_incomming_edge(clock_capture_edge);
}
TimingPathInfo path_info(tag_retriever.type(),
calc_path_delay(data_arrival_path),
slack_tag.time(),
find_startpoint(data_arrival_path),
find_endpoint(data_arrival_path),
launch_domain,
capture_domain);
TimingPath path(path_info,
clock_launch_path,
data_arrival_path,
clock_capture_path,
data_required_element,
slack_tag);
return path;
}
TimingSubPath trace_clock_launch_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId data_launch_node) {
TATUM_ASSERT(timing_graph.node_type(data_launch_node) == NodeType::SOURCE);
/*
* Backtrace the launch clock path
*/
std::vector<TimingPathElem> clock_launch_elements;
EdgeId clock_launch_edge = timing_graph.node_clock_launch_edge(data_launch_node);
if(clock_launch_edge) {
//Through the clock network
NodeId curr_node = timing_graph.edge_src_node(clock_launch_edge);
TATUM_ASSERT(timing_graph.node_type(curr_node) == NodeType::CPIN);
while(curr_node) {
auto launch_tags = tag_retriever.tags(curr_node, TagType::CLOCK_LAUNCH);
auto iter = find_tag(launch_tags, launch_domain, capture_domain);
if(iter == launch_tags.end()) {
//Then look for incompletely specified (i.e. wildcard) capture clocks
iter = find_tag(launch_tags, launch_domain, DomainId::INVALID());
}
TATUM_ASSERT(iter != launch_tags.end());
EdgeId edge;
if(iter->origin_node()) {
edge = timing_graph.find_edge(iter->origin_node(), curr_node);
TATUM_ASSERT(edge);
}
//Record
clock_launch_elements.emplace_back(*iter, curr_node, edge);
//Advance to the previous node
curr_node = iter->origin_node();
}
}
//Reverse backtrace
std::reverse(clock_launch_elements.begin(), clock_launch_elements.end());
return TimingSubPath(clock_launch_elements);
}
TimingSubPath trace_data_arrival_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId data_capture_node) {
TATUM_ASSERT(timing_graph.node_type(data_capture_node) == NodeType::SINK);
/*
* Backtrace the data launch path
*/
std::vector<TimingPathElem> data_arrival_elements;
NodeId curr_node = data_capture_node;
while(curr_node && timing_graph.node_type(curr_node) != NodeType::CPIN) {
//Trace until we hit the origin, or a clock pin
auto data_tags = tag_retriever.tags(curr_node, TagType::DATA_ARRIVAL);
//First try to find the exact tag match
auto iter = find_tag(data_tags, launch_domain, capture_domain);
if(iter == data_tags.end()) {
//Then look for incompletely specified (i.e. wildcard) capture clocks
iter = find_tag(data_tags, launch_domain, DomainId::INVALID());
//Look for a constant generator
if (iter == data_tags.end()) {
iter = find_tag(data_tags, DomainId::INVALID(), DomainId::INVALID());
TATUM_ASSERT(iter != data_tags.end());
TATUM_ASSERT(is_const_gen_tag(*iter));
}
}
TATUM_ASSERT(iter != data_tags.end());
EdgeId edge;
if(iter->origin_node()) {
edge = timing_graph.find_edge(iter->origin_node(), curr_node);
TATUM_ASSERT(edge);
}
//Record
data_arrival_elements.emplace_back(*iter, curr_node, edge);
//Advance to the previous node
curr_node = iter->origin_node();
}
EdgeId clock_launch_edge = timing_graph.node_clock_launch_edge(data_arrival_elements.back().node());
if(clock_launch_edge) {
//Mark the edge between clock and data paths (i.e. setup/hold edge)
data_arrival_elements.back().set_incomming_edge(clock_launch_edge);
}
//Since we backtraced from sink we reverse to get the forward order
std::reverse(data_arrival_elements.begin(), data_arrival_elements.end());
return TimingSubPath(data_arrival_elements);
}
TimingSubPath trace_clock_capture_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId data_capture_node) {
TATUM_ASSERT(timing_graph.node_type(data_capture_node) == NodeType::SINK);
/*
* Backtrace the clock capture path
*/
std::vector<TimingPathElem> clock_capture_elements;
EdgeId clock_capture_edge = timing_graph.node_clock_capture_edge(data_capture_node);
if(clock_capture_edge) {
NodeId curr_node = timing_graph.edge_src_node(clock_capture_edge);
TATUM_ASSERT(timing_graph.node_type(curr_node) == NodeType::CPIN);
while(curr_node) {
//Record the clock capture tag
auto capture_tags = tag_retriever.tags(curr_node, TagType::CLOCK_CAPTURE);
auto iter = find_tag(capture_tags, launch_domain, capture_domain);
TATUM_ASSERT(iter != capture_tags.end());
EdgeId edge;
if(iter->origin_node()) {
edge = timing_graph.find_edge(iter->origin_node(), curr_node);
TATUM_ASSERT(edge);
}
clock_capture_elements.emplace_back(*iter, curr_node, edge);
//Advance to the previous node
curr_node = iter->origin_node();
}
}
//Reverse backtrace
std::reverse(clock_capture_elements.begin(), clock_capture_elements.end());
return TimingSubPath(clock_capture_elements);
}
TimingSubPath trace_skew_clock_launch_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId data_launch_node) {
TimingSubPath subpath = trace_clock_launch_path(timing_graph, tag_retriever, launch_domain, capture_domain, data_launch_node);
//A primary input may have no actual clock path, since the data arrival time is marked directly
if (subpath.elements().empty()) {
std::vector<TimingPathElem> elements;
auto data_arrival_tags = tag_retriever.tags(data_launch_node, TagType::DATA_ARRIVAL);
auto iter = find_tag(data_arrival_tags, launch_domain, DomainId::INVALID());
TATUM_ASSERT(iter != data_arrival_tags.end());
elements.emplace_back(*iter, data_launch_node, EdgeId::INVALID());
subpath = TimingSubPath(elements);
}
return subpath;
}
TimingSubPath trace_skew_clock_capture_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId data_capture_node) {
TimingSubPath subpath = trace_clock_capture_path(timing_graph, tag_retriever, launch_domain, capture_domain, data_capture_node);
//A primary output may have no actual clock path, since the data required time is marked directly
if (subpath.elements().empty()) {
std::vector<TimingPathElem> elements;
auto data_arrival_tags = tag_retriever.tags(data_capture_node, TagType::DATA_REQUIRED);
auto iter = find_tag(data_arrival_tags, launch_domain, capture_domain);
TATUM_ASSERT(iter != data_arrival_tags.end());
elements.emplace_back(*iter, data_capture_node, EdgeId::INVALID());
subpath = TimingSubPath(elements);
}
return subpath;
}
Time calc_path_delay(const TimingSubPath& path) {
if (path.elements().size() > 0) {
TimingTag first_arrival = path.elements().begin()->tag();
TimingTag last_arrival = (--path.elements().end())->tag();
return last_arrival.time() - first_arrival.time();
} else {
return Time(0.);
}
}
Time path_end(const TimingSubPath& path) {
if (path.elements().size() > 0) {
TimingTag last_arrival = (--path.elements().end())->tag();
return last_arrival.time();
} else {
return Time(0.);
}
}
NodeId find_startpoint(const TimingSubPath& path) {
TATUM_ASSERT(path.elements().size() > 0);
return path.elements().begin()->node();
}
NodeId find_endpoint(const TimingSubPath& path) {
TATUM_ASSERT(path.elements().size() > 0);
return (--path.elements().end())->node();
}
}} //namespace

View File

@ -0,0 +1,48 @@
#ifndef TATUM_TIMING_PATH_TRACING_HPP
#define TATUM_TIMING_PATH_TRACING_HPP
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/report/TimingReportTagRetriever.hpp"
#include "TimingPath.hpp"
namespace tatum { namespace detail {
TimingPath trace_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node);
TimingSubPath trace_clock_launch_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node);
TimingSubPath trace_data_arrival_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node);
TimingSubPath trace_clock_capture_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node);
TimingSubPath trace_skew_clock_launch_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId data_launch_node);
TimingSubPath trace_skew_clock_capture_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId data_capture_node);
Time calc_path_delay(const TimingSubPath& path);
Time path_end(const TimingSubPath& path);
}} //namespace
#endif

View File

@ -0,0 +1,214 @@
#ifndef TATUM_TIMING_ANALYZERS_HPP
#define TATUM_TIMING_ANALYZERS_HPP
#include "timing_analyzers_fwd.hpp"
/** \file
* Timing Analysis: Overview
* ===========================
* Timing analysis involves determining at what point in time (relative to some reference,
* usually a clock) signals arrive at every point in a circuit. This is used to verify
* that none of these signals will violate any constraints, ensuring the circuit will operate
* correctly and reliably.
*
* The circuit is typically modelled as a directed graph (called a timing graph) where nodes
* represent the 'pins' of elements in the circuit and edges the connectivity between them.
* Delays through the circuit (e.g. along wires, or through combinational logic) are associated
* with the edges.
*
* We are generally interested in whether signals arrive late (causing a setup
* constraint violation) or arrive to early (causing a hold violation). Typically long/slow
* paths cause setup violations, and short/fast paths hold violations. Violating a setup or
* hold constraint can cause meta-stability in Flip-Flops putting the circuit into an
* undetermined state for an indeterminate period of time (this is not good).
*
* To perform a completely accurate, non-pessemistic timing analysis would involve determining
* exactly which set of state/inputs to the circuit trigger the worst case path. This requires
* a dynamic analysis of the circuits behaviour and is prohibitively expensive to compute in
* practice.
*
*
* Static Timing Analysis (STA)
* ------------------------------
* Static Timing Analysis (STA), which this library implements, simplifies the problem somewhat
* by ignoring the dynamic behaviour the circuit. That is, we assume all paths could be sensitized,
* even if in practice they may be extremely rare or impossible to sensitize in practice. This
* makes the result of our analysis pessemistic, but also makes the problem tractable.
*
* There are two approaches to performing STA: 'path-based' and 'block-based'.
*
* Under Path-Based Analysis (PBA) all paths in the circuit are analyzed. This provides
* a more accurate (less-pessimistic) analysis than block-based approaches but can require
* an exponential amount of time. In particular, circuit structures with re-convergent fanout
* can exponentiallly increase the number of paths through the circuit which must be evaluated.
*
* To avoid this unpleasent behaviour, we implement 'block-based' STA. Under this formulation,
* only the worst case values are kept at each node in the circuit. While this is more
* pessimistic (any path passing through a node is now viewed as having the worst case delay
* of any path through that node), it greatly reduces the computational complexity, allowing
* STA to be perfomed in linear time.
*
* Arrival Time, Required Time & Slack
* ---------------------------------------
* When a Timing Analyzer performs timing analysis it is primarily calculating the following:
*
* - Arrival Time: The time a signal actually arrived at a particular point in the circuit.
*
* - Required Time: The time a signal should have arrived (was required) at a particular
* to avoid violating a timing constraint.
*
* - Slack: The difference between required and arrival times. This indicates how close
* a particular path/node is to violating its timing constraint. A positive
* value indicates there is no violation, a negative value indicates there is
* a violation. The magnitude of the slack indicates by how much the constraint
* is passing/failing. A value of zero indicates that the constraint is met
* exactly.
* TODO: Implement slack calculator
*
*
* Calculating Arrival & Required Times
* --------------------------------------
* It is also useful to define the following collections of timing graph nodes:
* - Primary Inputs (PIs): circuit external input pins, Flip-Flop Q pins
* - Primary Outputs (POs): circuit external output pins, Flip-Flop D pins
* Note that in the timing graph PIs have no fan-in, and POs have no fan-out.
*
* The arrival and required times are calculated at every node in the timing graph by walking
* the timing graph in either a 'forward' or 'backward' direction. The following provide a
* high-level description of the process.
*
* On the initial (forward) traversal, the graph is walked from PIs to POs to calculate arrival
* time, performing the following:
* 1) Initialize the arrival time of all PIs based on constraints (typically zero in the
* simplest case)
* 2) Add the delay of each edge to the arrival time of the edge's driving node to
* to calculate the edge arrival time.
* 3) At each downstream node calculate the max (setup) or min (hold) of all input
* edge arrival times, and store it as the node arrival time.
* 4) Repeat (2)-(3) until all nodes have valid arrival times.
*
* On the second (backward) traversal, the graph is walked in reverse from POs to PIs, performing
* the following:
* 1) Initialize the required times of all POs based on constraints (typically target
* clock period for setup analysis)
* 2) Subtract the delay of each edge to the required time of the edge's sink node to
* to calculate the edge required time.
* 3) At each upstream node calculate the min (setup) or max (hold) of all input
* edge required times, and store it as the node required time.
* 4) Repeat (2)-(3) until all nodes have valid required times.
*
* Clock Skew
* ------------
* In a real system the clocks which launch signals at the PIs and capture them at POs may not
* all arrive at the same instance in time. This difference is known as 'skew'.
*
* Skew can be modled by adjusting the initialized PI arrival times to reflect when the clock
* signal actually reaches the node. Similarily the PO required times can also be adjusted.
*
* Multi-clock Analysis
* ----------------------
* The previous discussion has focused primarily on single-clock STA. In a multi-clock analysis
* transfers between clock domains need to be handled (e.g. if the launch and capture clocks are
* different). This is typically handled by identifying the worst-case alignment between all pairs
* of clocks. This worst case value then becomes the constraint between the two clocks.
*
* To perform a multi-clock analysis the paths between different clocks need to be considered with
* the identified constraint applied. This can be handled in two ways either:
* a) Performing multiple single-clock analysis passes (one for each pair of clocks), or
* b) Perform a single analysis but track multiple arrival/required times (a unique one for
* each clock).
*
* Approach (b) turns out to be more efficient in practice, since it only requires a single traversal
* of the timing graph. The combined values {clock, arrival time, required time} (and potentially other
* info) are typically combined into a single 'Tag'. As a result there may be multiple tags stored
* at each node in the timing graph.
*/
/* XXX TODO: these features haven't yet been implemented!
* ======================================================
*
* Derating & Pesimism Reduction Techniques
* ------------------------------------------
*
* Unlike the previous discussion (which assumed constant delay values), in reality circuit delays
* varry based on a variety of different parameters. To generate a correct (i.e. pessimistic and not
* optimistic) analysis this means we must often choose a highly pessemistic value for this single
* delay. In order to recovering some of this pessimism requires modeling more details of the system.
*
* Slew & Rise/Fall
* ------------------
* Two of the key parameters that the single delay model does not account for are the impact of:
* - Signals 'slew rate' (e.g. signal transition time from low to high), which can effect
* the delay through a logic gate
* - Signal direction (i.e. is a logic gate's output rising or falling). In CMOS
* technologies different transitors of different types are activated depending on the
* direction of the output signal (e.g. NMOS pull-down vs PMOS pull-up)
*
* Derating
* ----------
* Another challenge in modern CMOS technologies is the presence of On-Chip Variation (OCV),
* identically design structures (e.g. transistors, wires) may have different performance
* due to manufacturing variation. To capture this behaviour one approach is to apply a
* derate to delays on different paths in the circuit.
*
* In the typical approach applies incrementally more advanced derating, focusing firstly
* on the most significant sources of variation. A typical progressing is to apply derating to:
* 1) Clock Path
* Since clocks often span large portions of the chip they can be subject to large
* variation. To ensure a pessemistic analysis an late (early) derate is applied to
* clock launch paths, and a early (launch) derate to clock capture paths for a setup
* (hold) analysis.
*
* 2) Data Path
* Early (late) derates can also be applied to data paths to account for their variation
* during setup (hold) analysis.
* Note that data paths tend to be more localized to clock networks so the impact of
* OCV tends to be smaller.
*
* The simplest form of derating is a fixed derate applied to every delay, this can be extended
* to different delays for different types of circuit elements (wires vs cells, individual cells
* etc.).
*
* It turns out that fixed derates are overly pessemistic, since it is unlikely (particularliy on
* long paths) that random variation will always go in one direction. Improved forms of derating
* take into account the length (depth) of a path, often using a (user specified) table of derates
* which falls off (derates less) along deeper paths. It is also possible for the derate to take
* into account the physical locality/spread of a path.
*
* Common Clock Pessimism Removal (CCPR)
* ---------------------------------------
* Note: Names for this vary, including: Clock Reconvergence Pessimism Removal (CRPR),
* Common Path Pessimism Removal (CPPR), and likely others.
*
* Derating can also introduce pessimism when applied to clock networks if the launch and
* capture paths share some common portion of the clock network. Specifically, using
* early/late derates on the launch/capture paths of a clock network may model a scenario
* which is physically impossible to occur: the shared portion of the clock path cannot
* be both early and late at the same time.
*
* CCPR does not directly remove this effect, but instead calculates a 'credit' which is
* added back to the final path to counter-act this extra pessimism.
*/
/*
* IMPLEMENTATION NOTES
* ====================
*
* All the timing analyzers included here are pure abstract classes.
* They should all have pure virtual functions and store NO data members.
*
* We use multiple (virtual) inheritance to define the SetupHoldTimingAnalyzer
* class, allowing it to be cleanly substituted for any SetupTimingAnalyzer
* or HoldTimingAnalyzer.
*
* Note also that we are using the NVF (Non-Virtual Interface) pattern, so
* any public member functions should delegate their work to an appropriate
* protected virtual member function.
*/
#include "analyzers/TimingAnalyzer.hpp"
#include "analyzers/SetupTimingAnalyzer.hpp"
#include "analyzers/HoldTimingAnalyzer.hpp"
#include "analyzers/SetupHoldTimingAnalyzer.hpp"
#endif

View File

@ -0,0 +1,13 @@
#ifndef TATUM_TIMING_ANALYZERS_FWD_HPP
#define TATUM_TIMING_ANALYZERS_FWD_HPP
namespace tatum {
class TimingAnalyzer;
class SetupTimingAnalyzer;
class HoldTimingAnalyzer;
class SetupHoldTimingAnalyzer;
} //namespace
#endif

View File

@ -0,0 +1,108 @@
#include "tatum/timing_paths.hpp"
#include "tatum/TimingGraph.hpp"
#include "tatum/TimingConstraints.hpp"
#include "tatum/report/TimingReportTagRetriever.hpp"
#include "tatum/report/timing_path_tracing.hpp"
#include "tatum/error.hpp"
namespace tatum {
//Generic path tracer for setup or hold
TimingPath trace_path(const TimingGraph& timing_graph,
const detail::TagRetriever& tag_retriever,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node);
std::vector<TimingPathInfo> find_critical_paths(const TimingGraph& timing_graph,
const TimingConstraints& timing_constraints,
const SetupTimingAnalyzer& setup_analyzer) {
std::vector<TimingPathInfo> cpds;
//We calculate the critical path delay (CPD) for each pair of clock domains (which are connected to each other)
//
//Intuitively, CPD is the smallest period (maximum frequency) we can run the launch clock at while not violating
//the constraint.
//
//We calculate CPD as:
//
// CPD = constarint - slack
//
//If slack < 0, this will lengthen the constraint to it's feasible value (i.e. the CPD)
//If slack > 0, this will tighten the constraint to it's feasible value (i.e. the CPD)
//
//Using the slack to calculate CPD implicitly accounts for i/o delays, clock uncertainty, clock latency etc,
//as they are already included in the slack.
//To ensure we find the critical path delay, we look at all timing endpoints (i.e. logical_outputs()) and keep
//the largest for each domain pair
for(NodeId node : timing_graph.logical_outputs()) {
//Look at each data arrival
for(TimingTag slack_tag : setup_analyzer.setup_slacks(node)) {
Time slack = slack_tag.time();
if(!slack.valid()) {
throw Error("slack is not valid", node);
}
Time constraint = Time(timing_constraints.setup_constraint(slack_tag.launch_clock_domain(), slack_tag.capture_clock_domain()));
if(!constraint.valid()) {
throw Error("constraint is not valid", node);
}
Time cpd = Time(constraint) - slack;
if(!cpd.valid()) {
throw Error("cpd is not valid", node);
}
//Record the path info
TimingPathInfo path(TimingType::SETUP,
cpd, slack,
NodeId::INVALID(), //We currently don't trace the path back to the start point, so just mark as invalid
node,
slack_tag.launch_clock_domain(), slack_tag.capture_clock_domain());
//Find any existing path for this domain pair
auto cmp = [&path](const TimingPathInfo& elem) {
return elem.launch_domain() == path.launch_domain()
&& elem.capture_domain() == path.capture_domain();
};
auto iter = std::find_if(cpds.begin(), cpds.end(), cmp);
if(iter == cpds.end()) {
//New domain pair
cpds.push_back(path);
} else if(iter->delay() < path.delay()) {
//New max CPD
*iter = path;
}
}
}
return cpds;
}
TimingPath trace_setup_path(const TimingGraph& timing_graph,
const SetupTimingAnalyzer& setup_analyzer,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node) {
detail::SetupTagRetriever tag_retriever(setup_analyzer);
return detail::trace_path(timing_graph, tag_retriever, launch_domain, capture_domain, sink_node);
}
TimingPath trace_hold_path(const TimingGraph& timing_graph,
const HoldTimingAnalyzer& hold_analyzer,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node) {
detail::HoldTagRetriever tag_retriever(hold_analyzer);
return detail::trace_path(timing_graph, tag_retriever, launch_domain, capture_domain, sink_node);
}
} //namespace

View File

@ -0,0 +1,29 @@
#ifndef TATUM_TIMING_PATHS_HPP
#define TATUM_TIMING_PATHS_HPP
#include <vector>
#include "tatum/timing_analyzers.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/report/TimingPath.hpp"
namespace tatum {
std::vector<TimingPathInfo> find_critical_paths(const TimingGraph& timing_graph,
const TimingConstraints& timing_constraints,
const SetupTimingAnalyzer& setup_analyzer);
TimingPath trace_setup_path(const TimingGraph& timing_graph,
const SetupTimingAnalyzer& setup_analyzer,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node);
TimingPath trace_hold_path(const TimingGraph& timing_graph,
const HoldTimingAnalyzer& hold_analyzer,
const DomainId launch_domain,
const DomainId capture_domain,
const NodeId sink_node);
} //namespace
#endif

View File

@ -0,0 +1,42 @@
#ifndef TATUM_OS_FORMAT_GUARD_HPP
#define TATUM_OS_FORMAT_GUARD_HPP
#include <ostream>
namespace tatum {
//A RAII guard class to ensure restoration of output stream format
class OsFormatGuard {
public:
explicit OsFormatGuard(std::ostream& os)
: os_(os)
, flags_(os_.flags()) //Save formatting flag state
, width_(os_.width())
, precision_(os.precision())
, fill_(os.fill())
{}
~OsFormatGuard() {
os_.flags(flags_); //Restore
os_.width(width_);
os_.precision(precision_);
os_.fill(fill_);
}
OsFormatGuard(const OsFormatGuard&) = delete;
OsFormatGuard& operator=(const OsFormatGuard&) = delete;
OsFormatGuard(const OsFormatGuard&&) = delete;
OsFormatGuard& operator=(const OsFormatGuard&&) = delete;
private:
std::ostream& os_;
std::ios::fmtflags flags_;
std::streamsize width_;
std::streamsize precision_;
char fill_;
};
} //namespace
#endif

View File

@ -0,0 +1,112 @@
#ifndef TATUM_ASSERT_H
#define TATUM_ASSERT_H
#include <cstdio> //fprintf, stderr
#include <cstdlib> //abort
/*
* The header defines useful assertion macros for TATUM projects.
*
* Three types of assertions are defined:
* TATUM_ASSERT_OPT - low overhead assertions that should always be enabled
* TATUM_ASSERT - medium overhead assertions that may be enabled
* TATUM_ASSERT_SAFE - high overhead assertions typically enabled only for debugging
* Each of the above assertions also have a *_MSG variants (e.g. TATUM_ASSERT_MSG(expr, msg))
* which takes an additional argument specifying additional message text to be shown when
* the assertion fails.
*
* The macro TATUM_ASSERT_LEVEL specifies the level of assertion checking desired:
*
* TATUM_ASSERT_LEVEL == 3: TATUM_ASSERT_OPT, TATUM_ASSERT, TATUM_ASSERT_SAFE enabled
* TATUM_ASSERT_LEVEL == 2: TATUM_ASSERT_OPT, TATUM_ASSERT enabled
* TATUM_ASSERT_LEVEL == 1: TATUM_ASSERT_OPT enabled
* TATUM_ASSERT_LEVEL == 0: No assertion checking enabled
* Note that an assertion levels beyond 3 are currently treated the same as level 3
*/
//Set a default assertion level if none is specified
#ifndef TATUM_ASSERT_LEVEL
# define TATUM_ASSERT_LEVEL 2
#endif
//Enable the assertions based on the specified level
#if TATUM_ASSERT_LEVEL >= 3
# define TATUM_ASSERT_SAFE_ENABLED
#endif
#if TATUM_ASSERT_LEVEL >= 2
#define TATUM_ASSERT_ENABLED
#endif
#if TATUM_ASSERT_LEVEL >= 1
# define TATUM_ASSERT_OPT_ENABLED
#endif
//Define the user assertion macros
#ifdef TATUM_ASSERT_SAFE_ENABLED
# define TATUM_ASSERT_SAFE(expr) TATUM_ASSERT_IMPL(expr, nullptr)
# define TATUM_ASSERT_SAFE_MSG(expr, msg) TATUM_ASSERT_IMPL(expr, msg)
#else
# define TATUM_ASSERT_SAFE(expr) static_cast<void>(0)
# define TATUM_ASSERT_SAFE_MSG(expr, msg) static_cast<void>(0)
#endif
#ifdef TATUM_ASSERT_ENABLED
# define TATUM_ASSERT(expr) TATUM_ASSERT_IMPL(expr, nullptr)
# define TATUM_ASSERT_MSG(expr, msg) TATUM_ASSERT_IMPL(expr, msg)
#else
# define TATUM_ASSERT(expr) static_cast<void>(0)
# define TATUM_ASSERT_MSG(expr, msg) static_cast<void>(0)
#endif
#ifdef TATUM_ASSERT_OPT_ENABLED
# define TATUM_ASSERT_OPT(expr) TATUM_ASSERT_IMPL(expr, nullptr)
# define TATUM_ASSERT_OPT_MSG(expr, msg) TATUM_ASSERT_IMPL(expr, msg)
#else
# define TATUM_ASSERT_OPT(expr) static_cast<void>(0)
# define TATUM_ASSERT_OPT_MSG(expr, msg) static_cast<void>(0)
#endif
//Define the assertion implementation macro
// We wrap the check in a do {} while() to ensure the function-like
// macro can be always be followed by a ';'
#define TATUM_ASSERT_IMPL(expr, msg) do {\
if(!(expr)) { \
tatum::util::Assert::handle_assert(#expr, __FILE__, __LINE__, TATUM_ASSERT_FUNCTION, msg); \
} \
} while(false)
//Figure out what macro to use to get the name of the current function
// We default to __func__ which is defined in C99
//
// g++ > 2.6 define __PRETTY_FUNC__ which includes class/namespace/overload
// information, so we prefer to use it if possible
#define TATUM_ASSERT_FUNCTION __func__
#ifdef __GNUC__
# ifdef __GNUC_MINOR__
# if __GNUC__ >= 2 && __GNUC_MINOR__ > 6
# undef TATUM_ASSERT_FUNCTION
# define TATUM_ASSERT_FUNCTION __PRETTY_FUNCTION__
# endif
# endif
#endif
namespace tatum { namespace util {
class Assert {
public:
static void handle_assert(const char* expr, const char* file, unsigned int line, const char* function, const char* msg) {
fprintf(stderr, "%s:%d", file, line);
if(function) {
fprintf(stderr, " %s:", function);
}
fprintf(stderr, " Assertion '%s' failed", expr);
if(msg) {
fprintf(stderr, " (%s)", msg);
}
fprintf(stderr, ".\n");
std::abort();
}
};
}} //namespace
#endif //TATUM_ASSERT_H

View File

@ -0,0 +1,141 @@
#ifndef TATUM_LINEAR_MAP
#define TATUM_LINEAR_MAP
#include <vector>
#include "tatum_assert.hpp"
namespace tatum { namespace util {
//A vector-like container which is indexed by K (instead of size_t as in std::vector).
//Requires that K be convertible to size_t with the size_t operator (i.e. size_t()), and
//that the conversion results in a linearly increasing index into the underlying vector.
//
//This results in a container that is similar to a std::map (i.e. converts from one type to
//another), but requires contiguously ascending (i.e. linear) keys. Unlike std::map only the
//values are stored (at the specified index/key), reducing memory usage and improving cache
//locality. Furthermore, find() returns an iterator to the value directly, rather than a std::pair,
//and insert() takes both the key and value as separate arguments and has no return value.
//
//Note that it is possible to use linear_map with sparse/non-contiguous keys, but this is typically
//memory inefficient as the underlying vector will allocate space for [0..size_t(max_key)-1],
//where max_key is the largest key that has been inserted.
//
//As with a std::vector, it is the caller's responsibility to ensure there is sufficient space
//for a given index/key before it is accessed. The exception to this are the find() and insert()
//methods which handle non-existing keys gracefully.
template<typename K, typename V>
class linear_map {
public: //Public types
typedef typename std::vector<V>::const_reference const_reference;
typedef typename std::vector<V>::reference reference;
typedef typename std::vector<V>::iterator iterator;
typedef typename std::vector<V>::const_iterator const_iterator;
typedef typename std::vector<V>::const_reverse_iterator const_reverse_iterator;
public: //Constructor
//Standard constructors
linear_map() = default;
linear_map(const linear_map&) = default;
linear_map(linear_map&&) = default;
linear_map& operator=(linear_map&) = default;
linear_map& operator=(linear_map&&) = default;
//Vector-like constructors
explicit linear_map(size_t n) : vec_(n) {}
explicit linear_map(size_t n, V init_val) : vec_(n, init_val) {}
explicit linear_map(std::vector<V>&& values) : vec_(values) {}
/*
*template<typename... Args>
*linear_map(Args&&... args)
* : vec_(std::forward<Args>(args)...)
*{ }
*/
public: //Accessors
//Iterators
const_iterator begin() const { return vec_.begin(); }
const_iterator end() const { return vec_.end(); }
const_reverse_iterator rbegin() const { return vec_.rbegin(); }
const_reverse_iterator rend() const { return vec_.rend(); }
//Indexing
const_reference operator[] (const K n) const {
TATUM_ASSERT_SAFE_MSG(size_t(n) < vec_.size(), "Out-of-range index");
return vec_[size_t(n)];
}
const_iterator find(const K key) const {
if(size_t(key) < vec_.size()) {
return vec_.begin() + size_t(key);
} else {
return vec_.end();
}
}
std::size_t size() const { return vec_.size(); }
bool empty() const { return vec_.empty(); }
bool contain(const K key) const { return size_t(key) < vec_.size(); }
public: //Mutators
//Delegate potentially overloaded functions to the underlying vector with perfect
//forwarding
template<typename... Args>
void push_back(Args&&... args) { vec_.push_back(std::forward<Args>(args)...); }
template<typename... Args>
void emplace_back(Args&&... args) { vec_.emplace_back(std::forward<Args>(args)...); }
template<typename... Args>
void resize(Args&&... args) { vec_.resize(std::forward<Args>(args)...); }
void clear() { vec_.clear(); }
size_t capacity() const { return vec_.capacity(); }
void shrink_to_fit() { vec_.shrink_to_fit(); }
//Iterators
iterator begin() { return vec_.begin(); }
iterator end() { return vec_.end(); }
//Indexing
reference operator[] (const K n) {
TATUM_ASSERT_SAFE_MSG(size_t(n) < vec_.size(), "Out-of-range index");
return vec_[size_t(n)];
}
iterator find(const K key) {
if(size_t(key) < vec_.size()) {
return vec_.begin() + size_t(key);
} else {
return vec_.end();
}
}
void insert(const K key, const V value) {
if(size_t(key) >= vec_.size()) {
//Resize so key is in range
vec_.resize(size_t(key) + 1);
}
//Insert the value
operator[](key) = value;
}
//Swap (this enables std::swap via ADL)
friend void swap(linear_map<K,V>& x, linear_map<K,V>& y) {
std::swap(x.vec_, y.vec_);
}
private:
std::vector<V> vec_;
};
}} //namespace
#endif

View File

@ -0,0 +1,32 @@
#ifndef TATUM_MATH_HPP
#define TATUM_MATH_HPP
#include <cmath>
namespace tatum { namespace util {
inline float absolute_error(float a, float b) {
return std::fabs(a - b);
}
inline float relative_error(float a, float b) {
if (a == b) {
return 0.;
}
if (std::fabs(b) > std::fabs(a)) {
return std::fabs((a - b) / b);
} else {
return std::fabs((a - b) / a);
}
}
inline bool nearly_equal(const float lhs, const float rhs, const float abs_err_tol, const float rel_err_tol) {
float abs_err = absolute_error(lhs, rhs);
float rel_err = relative_error(lhs, rhs);
return (abs_err <= abs_err_tol) || (rel_err <= rel_err_tol);
}
}} //namspace
#endif

View File

@ -0,0 +1,69 @@
#ifndef TATUM_RANGE_H
#define TATUM_RANGE_H
#include <iterator>
namespace tatum { namespace util {
/*
* The tatum::Range template models a range defined by two iterators of type T.
*
* It allows conveniently returning a range from a single function call
* without having to explicity expose the underlying container, or make two
* explicit calls to retireive the associated begin and end iterators.
* It also enables the easy use of range-based-for loops.
*
* For example:
*
* class My Data {
* public:
* typdef std::vector<int>::const_iterator my_iter;
* tatum::Range<my_iter> data();
* ...
* private:
* std::vector<int> data_;
* };
*
* ...
*
* MyDat my_data;
*
* //fill my_data
*
* for(int val : my_data.data()) {
* //work with values stored in my_data
* }
*
* The empty() and size() methods are convenience wrappers around the relevant
* iterator comparisons.
*
* Note that size() is only constant time if T is a random-access iterator!
*/
template<typename T>
class Range {
public:
typedef T iterator;
public:
Range(T b, T e): begin_(b), end_(e) {}
T begin() const { return begin_; }
T end() const { return end_; }
bool empty() const { return begin_ == end_; }
size_t size() const { return std::distance(begin_, end_); }
private:
T begin_;
T end_;
};
/*
* Creates a tatum::Range from a pair of iterators.
*
* Unlike using the tatum::Range() constructor (which requires specifying
* the template type T, using tatum::make_range() infers T from the arguments.
*
* Example usage:
* auto my_range = tatum::make_range(my_vec.begin(), my_vec.end());
*/
template<typename T>
Range<T> make_range(T b, T e) { return Range<T>(b, e); }
}} //namespace
#endif

View File

@ -0,0 +1,233 @@
#ifndef TATUM_STRONG_ID_H
#define TATUM_STRONG_ID_H
/*
* This header provides the StrongId class, a template which can be used to
* create strong Id's which avoid accidental type conversions (generating
* compiler errors when they occur).
*
* Motivation
* ==========
* It is common to use an Id (typically an integer) to identify and represent a component.
* A basic example (poor style):
*
* size_t count_net_terminals(int net_id);
*
* Where a plain int is used to represent the net identifier.
* Using a plain basic type is poor style since it makes it unclear that the parameter is
* an Id.
*
* A better example is to use a typdef:
*
* typedef int NetId;
*
* size_t count_net_teriminals(NetId net_id);
*
* It is now clear that the parameter is expecting an Id.
*
* However this approach has some limitations. In particular, typedef's only create type
* aliases, and still allow conversions. This is problematic if there are multiple types
* of Ids. For example:
*
* typedef int NetId;
* typedef int BlkId;
*
* size_t count_net_teriminals(NetId net_id);
*
* BlkId blk_id = 10;
* NetId net_id = 42;
*
* count_net_teriminals(net_id); //OK
* count_net_teriminals(blk_id); //Bug: passed a BlkId as a NetId
*
* Since typdefs are aliases the compiler issues no errors or warnings, and silently passes
* the BlkId where a NetId is expected. This results in hard to diagnose bugs.
*
* We can avoid this issue by using a StrongId:
*
* struct net_id_tag; //Phantom tag for NetId
* struct blk_id_tag; //Phantom tag for BlkId
*
* typedef StrongId<net_id_tag> NetId;
* typedef StrongId<blk_id_tag> BlkId;
*
* size_t count_net_teriminals(NetId net_id);
*
* BlkId blk_id = 10;
* NetId net_id = 42;
*
* count_net_teriminals(net_id); //OK
* count_net_teriminals(blk_id); //Compiler Error: NetId expected!
*
* StrongId is a template which implements the basic features of an Id, but disallows silent conversions
* between different types of Ids. It uses another 'tag' type (passsed as the first template parameter)
* to uniquely identify the type of the Id (preventing conversions between different types of Ids).
*
* Usage
* =====
*
* The StrongId template class takes one required and three optional template parameters:
*
* 1) Tag - the unique type used to identify this type of Ids [Required]
* 2) T - the underlying integral id type (default: int) [Optional]
* 3) T sentinel - a value representing an invalid Id (default: -1) [Optional]
*
* If no value is supllied during construction the StrongId is initialized to the invalid/sentinel value.
*
* Example 1: default definition
*
* struct net_id_tag;
* typedef StrongId<net_id_tag> NetId; //Internally stores an integer Id, -1 represents invalid
*
* Example 2: definition with custom underlying type
*
* struct blk_id_tag;
* typedef StrongId<net_id_tag,size_t> BlkId; //Internally stores a size_t Id, -1 represents invalid
*
* Example 3: definition with custom underlying type and custom sentinel value
*
* struct pin_id_tag;
* typedef StrongId<net_id_tag,size_t,0> PinId; //Internally stores a size_t Id, 0 represents invalid
*
* Example 4: Creating Ids
*
* struct net_id_tag;
* typedef StrongId<net_id_tag> MyId; //Internally stores an integer Id, -1 represents invalid
*
* MyId my_id; //Defaults to the sentinel value (-1 by default)
* MyId my_other_id = 5; //Explicit construction
* MyId my_thrid_id(25); //Explicit construction
*
* Example 5: Comparing Ids
*
* struct net_id_tag;
* typedef StrongId<net_id_tag> MyId; //Internally stores an integer Id, -1 represents invalid
*
* MyId my_id; //Defaults to the sentinel value (-1 by default)
* MyId my_id_one = 1;
* MyId my_id_two = 2;
* MyId my_id_also_one = 1;
*
* my_id_one == my_id_also_one; //True
* my_id_one == my_id; //False
* my_id_one == my_id_two; //False
* my_id_one != my_id_two; //True
*
* Example 5: Checking for invalid Ids
*
* struct net_id_tag;
* typedef StrongId<net_id_tag> MyId; //Internally stores an integer Id, -1 represents invalid
*
* MyId my_id; //Defaults to the sentinel value
* MyId my_id_one = 1;
*
* //Comparison against a constructed invalid id
* my_id == MyId::INVALID(); //True
* my_id_one == MyId::INVALID(); //False
* my_id_one != MyId::INVALID(); //True
*
* //The Id can also be evaluated in a boolean context against the sentinel value
* if(my_id) //False, my_id is invalid
* if(!my_id) //True my_id is valid
* if(my_id_one) //True my_id_one is valid
*
* Example 6: Indexing data structures
*
* struct my_id_tag;
* typedef StrongId<net_id_tag> MyId; //Internally stores an integer Id, -1 represents invalid
*
* std::vector<int> my_vec = {0, 1, 2, 3, 4, 5};
*
* MyId my_id = 2;
*
* my_vec[size_t(my_id)]; //Access the third element via explicit conversion
*/
#include <type_traits> //for std::is_integral
#include <functional> //for std::hash
#include <cstddef> //for std::size_t
namespace tatum { namespace util {
//Forward delcare the class (needed for operator declarations)
template<typename tag, typename T, T sentinel>
class StrongId;
//Forward declare the equality/inequality operators
// We need to do this before the class definition so the class can
// friend them
template<typename tag, typename T, T sentinel>
bool operator==(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs);
template<typename tag, typename T, T sentinel>
bool operator!=(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs);
template<typename tag, typename T, T sentinel>
bool operator<(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs);
//Class template definition with default template parameters
template<typename tag, typename T=int, T sentinel=T(-1)>
class StrongId {
static_assert(std::is_integral<T>::value, "T must be integral");
public:
//Gets the invalid Id
static constexpr StrongId INVALID() { return StrongId(); }
//Default to the sentinel value
constexpr StrongId() : id_(sentinel) {}
//Only allow explict constructions from a raw Id (no automatic conversions)
explicit StrongId(T id) noexcept : id_(id) {}
//Allow some explicit conversion to useful types
//Allow explicit conversion to bool (e.g. if(id))
explicit operator bool() const { return *this != INVALID(); }
//Allow explicit conversion to size_t (e.g. my_vector[size_t(strong_id)])
explicit operator std::size_t() const { return static_cast<std::size_t>(id_); }
//To enable hasing Ids
friend std::hash<StrongId<tag,T,sentinel>>;
//To enable comparisions between Ids
// Note that since these are templated functions we provide an empty set of template parameters
// after the function name (i.e. <>)
friend bool operator== <>(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs);
friend bool operator!= <>(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs);
friend bool operator< <>(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs);
private:
T id_;
};
template<typename tag, typename T, T sentinel>
bool operator==(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs) {
return lhs.id_ == rhs.id_;
}
template<typename tag, typename T, T sentinel>
bool operator!=(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs) {
return !(lhs == rhs);
}
//Needed for std::map-like containers
template<typename tag, typename T, T sentinel>
bool operator<(const StrongId<tag,T,sentinel>& lhs, const StrongId<tag,T,sentinel>& rhs) {
return lhs.id_ < rhs.id_;
}
}} //namespace tatum::util
//Specialize std::hash for StrongId's (needed for std::unordered_map-like containers)
namespace std {
template<typename tag, typename T, T sentinel>
struct hash<tatum::util::StrongId<tag,T,sentinel>> {
std::size_t operator()(const tatum::util::StrongId<tag,T,sentinel> k) const noexcept {
return std::hash<T>()(k.id_); //Hash with the underlying type
}
};
} //namespace std
#endif

View File

@ -0,0 +1,16 @@
#ifndef TATUM_FWD_HPP
#define TATUM_FWD_HPP
//Data structures
#include "tatum/TimingGraphFwd.hpp"
#include "tatum/TimingConstraintsFwd.hpp"
//Analyzers
#include "tatum/timing_analyzers_fwd.hpp"
#include "tatum/graph_walkers_fwd.hpp"
#include "tatum/analyzer_factory_fwd.hpp"
//Reporting
#include "tatum/TimingReporterFwd.hpp"
#endif

View File

@ -0,0 +1,56 @@
cmake_minimum_required(VERSION 2.8.12)
project("libtatumparse")
#Flex and Bison are used to generate the parser
find_package(BISON REQUIRED 3.0)
find_package(FLEX REQUIRED)
file(GLOB_RECURSE LIB_SOURCES */tatumparse*.cpp)
file(GLOB_RECURSE LIB_HEADERS tatumparse.hpp tatumparse/*.hpp)
#Include directories
set(LIB_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR})
#Find the flex and bison input files
file(GLOB LEXER_SOURCES */tatumparse.l)
file(GLOB PARSER_SOURCES */tatumparse.y)
#Make the flex and bison targets
flex_target(TatumLexer ${LEXER_SOURCES} ${CMAKE_CURRENT_BINARY_DIR}/tatumparse_lexer.gen.cpp
COMPILE_FLAGS --header-file=${CMAKE_CURRENT_BINARY_DIR}/tatumparse_lexer.gen.h)
bison_target(TatumParser ${PARSER_SOURCES} ${CMAKE_CURRENT_BINARY_DIR}/tatumparse_parser.gen.cpp)
add_flex_bison_dependency(TatumLexer TatumParser)
#Apply suppression flags to the relevant files, must come after flex/bision
#targets for output vars to be defined
set_source_files_properties(${FLEX_vpr_timing_graph_lexer_OUTPUTS}
${BISON_vpr_timing_graph_parser_OUTPUTS}
PROPERTIES COMPILE_FLAGS "${FLEX_BISON_WARN_SUPPRESS_FLAGS}")
#Treat .c as CXX
set_source_files_properties(${LIB_SOURCES} ${FLEX_TatumLexer_OUTPUTS} ${BISON_TatumParser_OUTPUT_SOURCE} PROPERTIES LANGUAGE CXX)
#Suppress attribute warnings from bison header
include(CheckCXXCompilerFlag)
CHECK_CXX_COMPILER_FLAG("-Wno-attributes" COMPILER_SUPPORTS_-Wno-attributes)
if(COMPILER_SUPPORTS_-Wno-attributes)
add_compile_options("-Wno-attributes")
endif()
#Suppress warnings in Flex/Bison generated files
if(FLEX_BISON_WARN_SUPPRESS_FLAGS)
add_compile_options(${FLEX_BISON_WARN_SUPPRESS_FLAGS})
endif()
#Create the library
add_library(libtatumparse STATIC
${LIB_HEADERS}
${LIB_SOURCES}
${FLEX_TatumLexer_OUTPUTS}
${BISON_TatumParser_OUTPUT_SOURCE})
#Need binary dir for flex/bison generated headers
target_include_directories(libtatumparse PRIVATE ${CMAKE_CURRENT_BINARY_DIR})
target_include_directories(libtatumparse PUBLIC ${LIB_INCLUDE_DIRS})
set_target_properties(libtatumparse PROPERTIES PREFIX "") #Avoid extra 'lib' prefix

View File

@ -0,0 +1,134 @@
#ifndef TATUMPARSE_H
#define TATUMPARSE_H
/*
* libtatumparse - Kevin E. Murray 2016
*
* Released under MIT License see LICENSE.txt for details.
*
* OVERVIEW
* --------------------------
* This library provides support for parsing Berkely Logic Interchange Format (TATUM)
* files. It supporst the features required to handle basic netlists (e.g. .model,
* .inputs, .outputs, .subckt, .names, .latch)
*
* USAGE
* --------------------------
* Define a callback derived from the tatumparse::Callback interface, and pass it
* to one of the tatumparse::tatum_parse_*() functions.
*
* The parser will then call the various callback methods as it encouters the
* appropriate parts of the netlist.
*
* See main.cpp and tatum_pretty_print.hpp for example usage.
*
*/
#include <vector>
#include <string>
#include <memory>
#include <limits>
#include <functional>
namespace tatumparse {
/*
* Data structure Forward declarations
*/
enum class NodeType {
SOURCE,
SINK,
IPIN,
OPIN,
CPIN
};
enum class EdgeType {
PRIMITIVE_COMBINATIONAL,
PRIMITIVE_CLOCK_LAUNCH,
PRIMITIVE_CLOCK_CAPTURE,
INTERCONNECT
};
enum class TagType {
SETUP_DATA_ARRIVAL,
SETUP_DATA_REQUIRED,
SETUP_LAUNCH_CLOCK,
SETUP_CAPTURE_CLOCK,
SETUP_SLACK,
HOLD_DATA_ARRIVAL,
HOLD_DATA_REQUIRED,
HOLD_LAUNCH_CLOCK,
HOLD_CAPTURE_CLOCK,
HOLD_SLACK
};
/*
* Callback object
*/
class Callback {
public:
virtual ~Callback() {};
//Start of parsing
virtual void start_parse() = 0;
//Sets current filename
virtual void filename(std::string fname) = 0;
//Sets current line number
virtual void lineno(int line_num) = 0;
virtual void start_graph() = 0;
virtual void add_node(int node_id, NodeType type, std::vector<int> in_edge_ids, std::vector<int> out_edge_ids) = 0;
virtual void add_edge(int edge_id, EdgeType type, int src_node_id, int sink_node_id, bool disabled=false) = 0;
virtual void finish_graph() = 0;
virtual void start_constraints() = 0;
virtual void add_clock_domain(int domain_id, std::string name) = 0;
virtual void add_clock_source(int node_id, int domain_id) = 0;
virtual void add_constant_generator(int node_id) = 0;
virtual void add_max_input_constraint(int node_id, int domain_id, float constraint) = 0;
virtual void add_min_input_constraint(int node_id, int domain_id, float constraint) = 0;
virtual void add_max_output_constraint(int node_id, int domain_id, float constraint) = 0;
virtual void add_min_output_constraint(int node_id, int domain_id, float constraint) = 0;
virtual void add_setup_constraint(int src_domain_id, int sink_domain_id, int capture_domain, float constraint) = 0;
virtual void add_hold_constraint(int src_domain_id, int sink_domain_id, int capture_domain, float constraint) = 0;
virtual void add_setup_uncertainty(int src_domain_id, int sink_domain_id, float uncertainty) = 0;
virtual void add_hold_uncertainty(int src_domain_id, int sink_domain_id, float uncertainty) = 0;
virtual void add_early_source_latency(int domain_id, float latency) = 0;
virtual void add_late_source_latency(int domain_id, float latency) = 0;
virtual void finish_constraints() = 0;
virtual void start_delay_model() = 0;
virtual void add_edge_delay(int edge_id, float min_delay, float max_delay) = 0;
virtual void add_edge_setup_hold_time(int edge_id, float min_delay, float max_delay) = 0;
virtual void finish_delay_model() = 0;
virtual void start_results() = 0;
virtual void add_node_tag(TagType type, int node_id, int launch_domain_id, int capture_domain_id, float time) = 0;
virtual void add_edge_slack(TagType type, int edge_id, int launch_domain_id, int capture_domain_id, float slack) = 0;
virtual void add_node_slack(TagType type, int node_id, int launch_domain_id, int capture_domain_id, float slack) = 0;
virtual void finish_results() = 0;
//End of parsing
virtual void finish_parse() = 0;
//Error during parsing
virtual void parse_error(const int curr_lineno, const std::string& near_text, const std::string& msg) = 0;
};
/*
* External functions for loading an SDC file
*/
void tatum_parse_filename(std::string filename, Callback& callback);
void tatum_parse_filename(const char* filename, Callback& callback);
//Loads from 'tatum'. 'filename' only used to pass a filename to callback and can be left unspecified
void tatum_parse_file(FILE* tatum, Callback& callback, const char* filename="");
/*
* Enumerations
*/
} //namespace
#endif

View File

@ -0,0 +1,58 @@
#include <cstdio>
#include "tatumparse.hpp"
#include "tatumparse_common.hpp"
#include "tatumparse_lexer.hpp"
#include "tatumparse_error.hpp"
namespace tatumparse {
/*
* Given a filename parses the file as an BLIF file
* and returns a pointer to a struct containing all
* the tatum commands. See tatum.h for data structure
* detials.
*/
void tatum_parse_filename(std::string filename, Callback& callback) {
tatum_parse_filename(filename.c_str(), callback);
}
void tatum_parse_filename(const char* filename, Callback& callback) {
FILE* infile = std::fopen(filename, "r");
if(infile != NULL) {
//Parse the file
tatum_parse_file(infile, callback, filename);
std::fclose(infile);
} else {
tatum_error_wrap(callback, 0, "", "Could not open file '%s'.\n", filename);
}
}
void tatum_parse_file(FILE* tatum_file, Callback& callback, const char* filename) {
//Initialize the lexer
Lexer lexer(tatum_file, callback);
//Setup the parser + lexer
Parser parser(lexer, callback);
//Just before parsing starts
callback.start_parse();
//Tell the caller the file name
callback.filename(filename);
//Do the actual parse
int error = parser.parse();
if(error) {
tatum_error_wrap(callback, 0, "", "File failed to parse.\n");
}
//Finished parsing
callback.finish_parse();
}
}

View File

@ -0,0 +1,152 @@
%{
/*
* Include Files
*/
#include "tatumparse.hpp"
#include "tatumparse/tatumparse_common.hpp"
#include "tatumparse/tatumparse_error.hpp"
#include "tatumparse/tatumparse_lexer.hpp"
%}
/*
* Options
*/
/* track line numbers*/
%option yylineno
/* No lexing accross files */
%option noyywrap
/* unistd.h doesn't exist on windows */
%option nounistd
/* Avoid unused yyunput function warning */
%option nounput
/* isatty() doesn't exist on windows */
%option never-interactive
/* no default rule to echo unrecongaized tokens to output */
%option nodefault
%option reentrant
/*
* Use a prefix to avoid name clashes with other
* flex lexers
*/
%option prefix="tatumparse_"
/* Common character classes */
ALPHA_SYMBOL [-a-zA-Z_~|:*/\[\]\.\{\}^+$]
DIGIT [0-9]
ALPHA_NUM_SYMBOL ({ALPHA_SYMBOL}|{DIGIT})
BACK_SLASH [\\]
WS [ \t]
ENDL (\n|\n\r|\r\n)
/*
* Symbol Definitions
*/
%%
#.*{ENDL} { /* ignore comments */ }
{BACK_SLASH}{WS}*{ENDL} { /* line continuation don't send EOL to parser */ }
^{WS}*{ENDL} { /* Ignore blank lines. */ }
{ENDL} {
return tatumparse::Parser::make_EOL();
}
{WS}+ { /*skip white space*/ }
timing_graph: { return tatumparse::Parser::make_TIMING_GRAPH(); }
node: { return tatumparse::Parser::make_NODE(); }
type: { return tatumparse::Parser::make_TYPE(); }
SOURCE { return tatumparse::Parser::make_SOURCE(); }
SINK { return tatumparse::Parser::make_SINK(); }
IPIN { return tatumparse::Parser::make_IPIN(); }
OPIN { return tatumparse::Parser::make_OPIN(); }
CPIN { return tatumparse::Parser::make_CPIN(); }
in_edges: { return tatumparse::Parser::make_IN_EDGES(); }
out_edges: { return tatumparse::Parser::make_OUT_EDGES(); }
edge: { return tatumparse::Parser::make_EDGE(); }
src_node: { return tatumparse::Parser::make_SRC_NODE(); }
sink_node: { return tatumparse::Parser::make_SINK_NODE(); }
disabled: { return tatumparse::Parser::make_DISABLED(); }
PRIMITIVE_COMBINATIONAL { return tatumparse::Parser::make_PRIMITIVE_COMBINATIONAL(); }
PRIMITIVE_CLOCK_LAUNCH { return tatumparse::Parser::make_PRIMITIVE_CLOCK_LAUNCH(); }
PRIMITIVE_CLOCK_CAPTURE { return tatumparse::Parser::make_PRIMITIVE_CLOCK_CAPTURE(); }
INTERCONNECT { return tatumparse::Parser::make_INTERCONNECT(); }
timing_constraints: { return tatumparse::Parser::make_TIMING_CONSTRAINTS(); }
CLOCK { return tatumparse::Parser::make_CLOCK(); }
CLOCK_SOURCE { return tatumparse::Parser::make_CLOCK_SOURCE(); }
CONSTANT_GENERATOR { return tatumparse::Parser::make_CONSTANT_GENERATOR(); }
MAX_INPUT_CONSTRAINT { return tatumparse::Parser::make_MAX_INPUT_CONSTRAINT(); }
MIN_INPUT_CONSTRAINT { return tatumparse::Parser::make_MIN_INPUT_CONSTRAINT(); }
MAX_OUTPUT_CONSTRAINT { return tatumparse::Parser::make_MAX_OUTPUT_CONSTRAINT(); }
MIN_OUTPUT_CONSTRAINT { return tatumparse::Parser::make_MIN_OUTPUT_CONSTRAINT(); }
SETUP_CONSTRAINT { return tatumparse::Parser::make_SETUP_CONSTRAINT(); }
HOLD_CONSTRAINT { return tatumparse::Parser::make_HOLD_CONSTRAINT(); }
SETUP_UNCERTAINTY { return tatumparse::Parser::make_SETUP_UNCERTAINTY(); }
HOLD_UNCERTAINTY { return tatumparse::Parser::make_HOLD_UNCERTAINTY(); }
EARLY_SOURCE_LATENCY { return tatumparse::Parser::make_EARLY_SOURCE_LATENCY(); }
LATE_SOURCE_LATENCY { return tatumparse::Parser::make_LATE_SOURCE_LATENCY(); }
domain: { return tatumparse::Parser::make_DOMAIN(); }
name: { return tatumparse::Parser::make_NAME(); }
constraint: { return tatumparse::Parser::make_CONSTRAINT(); }
uncertainty: { return tatumparse::Parser::make_UNCERTAINTY(); }
latency: { return tatumparse::Parser::make_LATENCY(); }
launch_domain: { return tatumparse::Parser::make_LAUNCH_DOMAIN(); }
capture_domain: { return tatumparse::Parser::make_CAPTURE_DOMAIN(); }
capture_node: { return tatumparse::Parser::make_CAPTURE_NODE(); }
delay_model: { return tatumparse::Parser::make_DELAY_MODEL(); }
min_delay: { return tatumparse::Parser::make_MIN_DELAY(); }
max_delay: { return tatumparse::Parser::make_MAX_DELAY(); }
setup_time: { return tatumparse::Parser::make_SETUP_TIME(); }
hold_time: { return tatumparse::Parser::make_HOLD_TIME(); }
analysis_result: { return tatumparse::Parser::make_ANALYSIS_RESULTS(); }
SETUP_DATA { return tatumparse::Parser::make_SETUP_DATA(); }
SETUP_DATA_ARRIVAL { return tatumparse::Parser::make_SETUP_DATA_ARRIVAL(); }
SETUP_DATA_REQUIRED { return tatumparse::Parser::make_SETUP_DATA_REQUIRED(); }
SETUP_LAUNCH_CLOCK { return tatumparse::Parser::make_SETUP_LAUNCH_CLOCK(); }
SETUP_CAPTURE_CLOCK { return tatumparse::Parser::make_SETUP_CAPTURE_CLOCK(); }
SETUP_SLACK { return tatumparse::Parser::make_SETUP_SLACK(); }
HOLD_DATA { return tatumparse::Parser::make_HOLD_DATA(); }
HOLD_DATA_ARRIVAL { return tatumparse::Parser::make_HOLD_DATA_ARRIVAL(); }
HOLD_DATA_REQUIRED { return tatumparse::Parser::make_HOLD_DATA_REQUIRED(); }
HOLD_LAUNCH_CLOCK { return tatumparse::Parser::make_HOLD_LAUNCH_CLOCK(); }
HOLD_CAPTURE_CLOCK { return tatumparse::Parser::make_HOLD_CAPTURE_CLOCK(); }
HOLD_SLACK { return tatumparse::Parser::make_HOLD_SLACK(); }
time: { return tatumparse::Parser::make_TIME(); }
slack: { return tatumparse::Parser::make_SLACK(); }
true { return tatumparse::Parser::make_TRUE(); }
false { return tatumparse::Parser::make_FALSE(); }
[+-]?{DIGIT}+ { return tatumparse::Parser::make_INT(atoi(tatumparse_get_text(yyscanner))); }
(nan)|([-+]?(inf|((({DIGIT}*\.?{DIGIT}+)|({DIGIT}+\.))([eE][-+]?{DIGIT}+)?))) {
return tatumparse::Parser::make_FLOAT(atof(tatumparse_get_text(yyscanner)));
}
\"{ALPHA_NUM_SYMBOL}+\" {
//We trim off the surrounding quotes
const char* quoted_text = tatumparse_get_text(yyscanner);
size_t len = strlen(quoted_text);
return tatumparse::Parser::make_STRING(std::string(quoted_text + 1, len-2));
}
<<EOF>> { /* If the file has no blank line at the end there will
not be the expected EOL following the last command.
So first time through, return EOL, and subsequently
return 0 (which indicated end of file). This ensures
there will always be an EOL provided to the parser.
However it may also generate a stray EOL if the last
line IS blank - so the parser must handle those correctly. */
static bool once; return (once = !once) ? tatumparse::Parser::make_EOL() : tatumparse::Parser::make_EOF();
}
. { tatumparse::tatum_error_wrap(callback, tatumparse_get_lineno(yyscanner), tatumparse_get_text(yyscanner), "unrecognized character"); }
%%

View File

@ -0,0 +1,309 @@
/* C++ parsers require Bison 3 */
%require "3.0"
%language "C++"
/* Write-out tokens header file */
%defines
/* Use Bison's 'variant' to store values.
* This allows us to use non POD types (e.g.
* with constructors/destrictors), which is
* not possible with the default mode which
* uses unions.
*/
%define api.value.type variant
/*
* Use the 'complete' symbol type (i.e. variant)
* in the lexer
*/
%define api.token.constructor
/*
* Add a prefix the make_* functions used to
* create the symbols
*/
%define api.token.prefix {TOKEN_}
/*
* Use a re-entrant (no global vars) parser
*/
/*%define api.pure full*/
/* Wrap everything in our namespace */
%define api.namespace {tatumparse}
/* Name the parser class */
%define parser_class_name {Parser}
/* Match the flex prefix */
%define api.prefix {tatumparse_}
/* Extra checks for correct usage */
%define parse.assert
/* Enable debugging info */
%define parse.trace
/* Better error reporting */
%define parse.error verbose
/*
* Fixes inaccuracy in verbose error reporting.
* May be slow for some grammars.
*/
/*%define parse.lac full*/
/* Track locations */
/*%locations*/
/* Generate a table of token names */
%token-table
%lex-param {Lexer& lexer}
%parse-param {Lexer& lexer}
%parse-param {Callback& callback}
%code requires {
#include <memory>
#include "tatumparse.hpp"
#include "tatumparse/tatumparse_lexer_fwd.hpp"
}
%code top {
#include "tatumparse/tatumparse_lexer.hpp"
//Bison calls tatumparse_lex() to get the next token.
//We use the Lexer class as the interface to the lexer, so we
//re-defined the function to tell Bison how to get the next token.
static tatumparse::Parser::symbol_type tatumparse_lex(tatumparse::Lexer& lexer) {
return lexer.next_token();
}
}
%{
#include <stdio.h>
#include <cmath>
#include "assert.h"
#include "tatumparse.hpp"
#include "tatumparse/tatumparse_common.hpp"
#include "tatumparse/tatumparse_error.hpp"
using namespace tatumparse;
%}
/* Declare constant */
%token TIMING_GRAPH "timing_graph:"
%token NODE "node:"
%token TYPE "type:"
%token SOURCE "SOURCE"
%token SINK "SINK"
%token IPIN "IPIN"
%token OPIN "OPIN"
%token CPIN "CPIN"
%token IN_EDGES "in_edges:"
%token OUT_EDGES "out_edges:"
%token EDGE "edge:"
%token SRC_NODE "src_node:"
%token SINK_NODE "sink_node:"
%token DISABLED "disabled:"
%token PRIMITIVE_COMBINATIONAL "PRIMITIVE_COMBINATIONAL"
%token PRIMITIVE_CLOCK_LAUNCH "PRIMITIVE_CLOCK_LAUNCH"
%token PRIMITIVE_CLOCK_CAPTURE "PRIMITIVE_CLOCK_CAPTURE"
%token INTERCONNECT "INTERCONNECT"
%token TRUE "true"
%token FALSE "false"
%token TIMING_CONSTRAINTS "timing_constraints:"
%token CLOCK "CLOCK"
%token CLOCK_SOURCE "CLOCK_SOURCE"
%token CONSTANT_GENERATOR "CONSTANT_GENERATOR"
%token MAX_INPUT_CONSTRAINT "MAX_INPUT_CONSTRAINT"
%token MIN_INPUT_CONSTRAINT "MIN_INPUT_CONSTRAINT"
%token MAX_OUTPUT_CONSTRAINT "MAX_OUTPUT_CONSTRAINT"
%token MIN_OUTPUT_CONSTRAINT "MIN_OUTPUT_CONSTRAINT"
%token SETUP_CONSTRAINT "SETUP_CONSTRAINT"
%token HOLD_CONSTRAINT "HOLD_CONSTRAINT"
%token SETUP_UNCERTAINTY "SETUP_UNCERTAINTY"
%token HOLD_UNCERTAINTY "HOLD_UNCERTAINTY"
%token EARLY_SOURCE_LATENCY "EARLY_SOURCE_LATENCY"
%token LATE_SOURCE_LATENCY "LATE_SOURCE_LATENCY"
%token DOMAIN "domain:"
%token NAME "name:"
%token CONSTRAINT "constraint:"
%token UNCERTAINTY "uncertainty:"
%token LATENCY "latency:"
%token LAUNCH_DOMAIN "launch_domain:"
%token CAPTURE_DOMAIN "capture_domain:"
%token CAPTURE_NODE "capture_node:"
%token DELAY_MODEL "delay_model:"
%token MIN_DELAY "min_delay:"
%token MAX_DELAY "max_delay:"
%token SETUP_TIME "setup_time:"
%token HOLD_TIME "hold_time:"
%token ANALYSIS_RESULTS "analysis_results:"
%token SETUP_DATA "SETUP_DATA"
%token SETUP_DATA_ARRIVAL "SETUP_DATA_ARRIVAL"
%token SETUP_DATA_REQUIRED "SETUP_DATA_REQUIRED"
%token SETUP_LAUNCH_CLOCK "SETUP_LAUNCH_CLOCK"
%token SETUP_CAPTURE_CLOCK "SETUP_CAPTURE_CLOCK"
%token SETUP_SLACK "SETUP_SLACK"
%token HOLD_DATA "HOLD_DATA"
%token HOLD_DATA_ARRIVAL "HOLD_DATA_ARRIVAL"
%token HOLD_DATA_REQUIRED "HOLD_DATA_REQUIRED"
%token HOLD_LAUNCH_CLOCK "HOLD_LAUNCH_CLOCK"
%token HOLD_CAPTURE_CLOCK "HOLD_CAPTURE_CLOCK"
%token HOLD_SLACK "HOLD_SLACK"
%token TIME "time:"
%token SLACK "slack:"
%token EOL "end-of-line"
%token EOF 0 "end-of-file"
/* declare variable tokens */
%token <std::string> STRING
%token <int> INT
%token <float> FLOAT
/* declare types */
%type <int> NodeId
%type <int> SrcNodeId
%type <int> SinkNodeId
%type <int> EdgeId
%type <tatumparse::NodeType> NodeType
%type <tatumparse::EdgeType> EdgeType
%type <std::vector<int>> IntList
%type <std::vector<int>> InEdges
%type <std::vector<int>> OutEdges
%type <int> DomainId
%type <int> LaunchDomainId
%type <int> CaptureDomainId
%type <int> CaptureNodeId
%type <float> Constraint
%type <float> Uncertainty
%type <float> Latency
%type <std::string> Name
%type <float> Number
%type <float> MaxDelay
%type <float> MinDelay
%type <float> SetupTime
%type <float> HoldTime
%type <float> Time
%type <float> Slack
%type <TagType> TagType
%type <bool> Disabled
%type <bool> Bool
/* Top level rule */
%start tatum_data
%%
tatum_data: /*empty*/ { }
| tatum_data Graph { callback.finish_graph(); }
| tatum_data Constraints { callback.finish_constraints(); }
| tatum_data DelayModel { callback.finish_delay_model(); }
| tatum_data Results { callback.finish_results(); }
| tatum_data EOL { /*eat stray EOL */ }
Graph: TIMING_GRAPH EOL { callback.start_graph(); }
| Graph NodeId NodeType InEdges OutEdges EOL { callback.add_node($2, $3, $4, $4); }
| Graph EdgeId EdgeType SrcNodeId SinkNodeId Disabled EOL { callback.add_edge($2, $3, $4, $5, $6); }
Constraints: TIMING_CONSTRAINTS EOL { callback.start_constraints(); }
| Constraints TYPE CLOCK DomainId Name EOL { callback.add_clock_domain($4, $5); }
| Constraints TYPE CLOCK_SOURCE NodeId DomainId EOL { callback.add_clock_source($4, $5); }
| Constraints TYPE CONSTANT_GENERATOR NodeId EOL { callback.add_constant_generator($4); }
| Constraints TYPE MAX_INPUT_CONSTRAINT NodeId DomainId Constraint EOL { callback.add_max_input_constraint($4, $5, $6); }
| Constraints TYPE MIN_INPUT_CONSTRAINT NodeId DomainId Constraint EOL { callback.add_min_input_constraint($4, $5, $6); }
| Constraints TYPE MAX_OUTPUT_CONSTRAINT NodeId DomainId Constraint EOL { callback.add_max_output_constraint($4, $5, $6); }
| Constraints TYPE MIN_OUTPUT_CONSTRAINT NodeId DomainId Constraint EOL { callback.add_min_output_constraint($4, $5, $6); }
| Constraints TYPE SETUP_CONSTRAINT LaunchDomainId CaptureDomainId Constraint EOL { callback.add_setup_constraint($4, $5, -1, $6); }
| Constraints TYPE HOLD_CONSTRAINT LaunchDomainId CaptureDomainId Constraint EOL { callback.add_hold_constraint($4, $5, -1, $6); }
| Constraints TYPE SETUP_CONSTRAINT LaunchDomainId CaptureDomainId CaptureNodeId Constraint EOL { callback.add_setup_constraint($4, $5, $6, $7); }
| Constraints TYPE HOLD_CONSTRAINT LaunchDomainId CaptureDomainId CaptureNodeId Constraint EOL { callback.add_hold_constraint($4, $5, $6, $7); }
| Constraints TYPE SETUP_UNCERTAINTY LaunchDomainId CaptureDomainId Uncertainty EOL { callback.add_setup_uncertainty($4, $5, $6); }
| Constraints TYPE HOLD_UNCERTAINTY LaunchDomainId CaptureDomainId Uncertainty EOL { callback.add_hold_uncertainty($4, $5, $6); }
| Constraints TYPE EARLY_SOURCE_LATENCY DomainId Latency EOL { callback.add_early_source_latency($4, $5); }
| Constraints TYPE LATE_SOURCE_LATENCY DomainId Latency EOL { callback.add_late_source_latency($4, $5); }
DelayModel: DELAY_MODEL EOL { callback.start_delay_model(); }
| DelayModel EdgeId MinDelay MaxDelay EOL { callback.add_edge_delay($2, $3, $4); }
| DelayModel EdgeId SetupTime HoldTime EOL { callback.add_edge_setup_hold_time($2, $3, $4); }
Results: ANALYSIS_RESULTS EOL { callback.start_results(); }
| Results TagType NodeId LaunchDomainId CaptureDomainId Time EOL { callback.add_node_tag($2, $3, $4, $5, NAN); }
| Results TagType EdgeId LaunchDomainId CaptureDomainId Slack EOL { callback.add_edge_slack($2, $3, $4, $5, NAN); }
| Results TagType NodeId LaunchDomainId CaptureDomainId Slack EOL { callback.add_node_slack($2, $3, $4, $5, NAN); }
Time: TIME Number { $$ = $2; }
Slack: SLACK Number { $$ = $2; }
TagType: TYPE SETUP_DATA_ARRIVAL { $$ = TagType::SETUP_DATA_ARRIVAL; }
| TYPE SETUP_DATA_REQUIRED { $$ = TagType::SETUP_DATA_REQUIRED; }
| TYPE SETUP_LAUNCH_CLOCK { $$ = TagType::SETUP_LAUNCH_CLOCK; }
| TYPE SETUP_CAPTURE_CLOCK { $$ = TagType::SETUP_CAPTURE_CLOCK; }
| TYPE SETUP_SLACK { $$ = TagType::SETUP_SLACK; }
| TYPE HOLD_DATA_ARRIVAL { $$ = TagType::HOLD_DATA_ARRIVAL; }
| TYPE HOLD_DATA_REQUIRED { $$ = TagType::HOLD_DATA_REQUIRED; }
| TYPE HOLD_LAUNCH_CLOCK { $$ = TagType::HOLD_LAUNCH_CLOCK; }
| TYPE HOLD_CAPTURE_CLOCK { $$ = TagType::HOLD_CAPTURE_CLOCK; }
| TYPE HOLD_SLACK { $$ = TagType::HOLD_SLACK; }
MaxDelay: MAX_DELAY Number { $$ = $2; }
MinDelay: MIN_DELAY Number { $$ = $2; }
SetupTime: SETUP_TIME Number { $$ = $2; }
HoldTime: HOLD_TIME Number { $$ = $2; }
DomainId: DOMAIN INT { $$ = $2; }
LaunchDomainId: LAUNCH_DOMAIN INT { $$ = $2; }
CaptureDomainId: CAPTURE_DOMAIN INT { $$ = $2; }
CaptureNodeId: CAPTURE_NODE INT { $$ = $2; }
Constraint: CONSTRAINT Number { $$ = $2; }
Uncertainty: UNCERTAINTY Number { $$ = $2; }
Latency: LATENCY Number { $$ = $2; }
Name: NAME STRING { $$ = $2; }
NodeType: TYPE SOURCE { $$ = NodeType::SOURCE; }
| TYPE SINK { $$ = NodeType::SINK; }
| TYPE IPIN { $$ = NodeType::IPIN; }
| TYPE OPIN { $$ = NodeType::OPIN; }
| TYPE CPIN { $$ = NodeType::CPIN; }
EdgeType: TYPE PRIMITIVE_COMBINATIONAL { $$ = EdgeType::PRIMITIVE_COMBINATIONAL; }
| TYPE PRIMITIVE_CLOCK_LAUNCH { $$ = EdgeType::PRIMITIVE_CLOCK_LAUNCH; }
| TYPE PRIMITIVE_CLOCK_CAPTURE { $$ = EdgeType::PRIMITIVE_CLOCK_CAPTURE; }
| TYPE INTERCONNECT { $$ = EdgeType::INTERCONNECT; }
NodeId: NODE INT { $$ = $2;}
InEdges: IN_EDGES IntList { $$ = $2; }
OutEdges: OUT_EDGES IntList { $$ = $2; }
EdgeId: EDGE INT { $$ = $2; }
SrcNodeId: SRC_NODE INT { $$ = $2; }
SinkNodeId: SINK_NODE INT { $$ = $2; }
Disabled: /* Unsipecified*/ { $$ = false; }
| DISABLED Bool { $$ = $2; }
Bool: TRUE { $$ = true; }
| FALSE { $$ = false; }
IntList: /*empty*/ { $$ = std::vector<int>(); }
| IntList INT { $$ = std::move($1); $$.push_back($2); }
Number: INT { $$ = $1; }
| FLOAT { $$ = $1; }
%%
void tatumparse::Parser::error(const std::string& msg) {
tatum_error_wrap(callback, lexer.lineno(), lexer.text(), msg.c_str());
}

View File

@ -0,0 +1,24 @@
#ifndef TATUMPARSE_COMMON_HPP
#define TATUMPARSE_COMMON_HPP
#include "tatumparse.hpp"
namespace tatumparse {
struct NodeTag {
NodeTag(int domain, float arr_val, float req_val)
: domain_id(domain), arr(arr_val), req(req_val) {}
int domain_id;
float arr;
float req;
};
struct NodeResult {
int id;
std::vector<NodeTag> tags;
};
}
#endif

View File

@ -0,0 +1,69 @@
#include <cstdarg>
#include <cassert>
#include "tatumparse_error.hpp"
#include "tatumparse.hpp"
namespace tatumparse {
std::string escape_string(const std::string& near_text);
//We wrap the actual tatum_error to issolate custom handlers from vaargs
void tatum_error_wrap(Callback& callback, const int line_no, const std::string& near_text, const char* fmt, ...) {
va_list args;
va_start(args, fmt);
//We need to copy the args so we don't change them before the true formating
va_list args_copy;
va_copy(args_copy, args);
//Determine the formatted length using a copy of the args
int len = std::vsnprintf(nullptr, 0, fmt, args_copy);
va_end(args_copy); //Clean-up
//Negative if there is a problem with the format string
assert(len >= 0 && "Problem decoding format string");
size_t buf_size = len + 1; //For terminator
//Allocate a buffer
// unique_ptr will free buffer automatically
std::unique_ptr<char[]> buf(new char[buf_size]);
//Format into the buffer using the original args
len = std::vsnprintf(buf.get(), buf_size, fmt, args);
va_end(args); //Clean-up
assert(len >= 0 && "Problem decoding format string");
assert(static_cast<size_t>(len) == buf_size - 1);
//Build the string from the buffer
std::string msg(buf.get(), len);
//TODO: escape near_text
std::string escaped_near_text = escape_string(near_text);
//Call the error handler
callback.parse_error(line_no, escaped_near_text, msg);
}
std::string escape_string(const std::string& near_text) {
std::string escaped_text;
for(char c : near_text) {
if(c == '\n') {
escaped_text += "\\n";
} else if(c == '\r') {
escaped_text += "\\r";
} else {
escaped_text += c;
}
}
return escaped_text;
}
}

View File

@ -0,0 +1,12 @@
#ifndef TATUM_ERROR_H
#define TATUM_ERROR_H
#include <functional>
#include "tatumparse.hpp"
namespace tatumparse {
void tatum_error_wrap(Callback& callback, const int line_no, const std::string& near_text, const char* fmt, ...);
}
#endif

View File

@ -0,0 +1,30 @@
#include "tatumparse_lexer.hpp"
#include "tatumparse_lexer.gen.h" //For tatumparse_lex_*()
extern YY_DECL; //For tatumparse_lex()
namespace tatumparse {
Lexer::Lexer(FILE* file, Callback& callback)
: callback_(callback) {
tatumparse_lex_init(&state_);
tatumparse_set_in(file, state_);
}
Lexer::~Lexer() {
tatumparse_lex_destroy(state_);
}
Parser::symbol_type Lexer::next_token() {
return tatumparse_lex(state_, callback_);
}
const char* Lexer::text() const {
return tatumparse_get_text(state_);
}
int Lexer::lineno() const {
return tatumparse_get_lineno(state_);
}
}

View File

@ -0,0 +1,34 @@
#ifndef TATUM_LEXER_HPP
#define TATUM_LEXER_HPP
#include "tatumparse.hpp" //For tatumparse::Callback
#include "tatumparse_parser.hpp" //For Parser::symbol_type
namespace tatumparse {
typedef void* yyscan_t;
class Lexer {
public:
Lexer(FILE* file, Callback& callback);
~Lexer();
Parser::symbol_type next_token();
const char* text() const;
int lineno() const;
private:
yyscan_t state_;
Callback& callback_;
};
} //namespace
/*
* The YY_DECL is used by flex to specify the signature of the main
* lexer function.
*
* We re-define it to something reasonable
*/
#undef YY_DECL
#define YY_DECL tatumparse::Parser::symbol_type tatumparse_lex(yyscan_t yyscanner, tatumparse::Callback& callback)
#endif

View File

@ -0,0 +1,9 @@
#ifndef TATUM_LEXER_FWD_H
#define TATUM_LEXER_FWD_H
//Forward declaration used by parser definition
namespace tatumparse {
class Lexer;
}
#endif

View File

@ -0,0 +1,14 @@
#ifndef TATUM_PARSER_HPP
#define TATUM_PARSER_HPP
#include "tatumparse_lexer_fwd.hpp" //Lexer class required for Parser param
//Required by parser generated header
#ifndef YY_NULLPTR
#define YY_NULLPTR nullptr
#endif
#include "tatumparse_common.hpp"
#include "tatumparse_parser.gen.hpp" //For the generated Parser class
#endif

View File

@ -0,0 +1,105 @@
#!/usr/bin/env python
import sys
import matplotlib.pyplot as plt
import numpy as np
import re
from collections import OrderedDict
import argparse
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("log_file", help="Log file to process")
parser.add_argument("--metric", choices=["edge_src_node", "traversed_node", "traversed_edge", "node_tags"])
args = parser.parse_args()
return args
def main():
args = parse_args()
data = []
arrival_level_indexes = OrderedDict()
required_level_indexes = OrderedDict()
data_regex = None
if(args.metric == "edge_src_node"):
data_regex = re.compile(r"Edge\((?P<edge_id>\d+)\) src_node")
elif (args.metric == "traversed_node"):
data_regex = re.compile(r"(Arrival|Required) Traverse Node\((?P<id>\d+)\)")
elif (args.metric == "traversed_edge"):
data_regex = re.compile(r"(Arrival|Required) Traverse Edge\((?P<id>\d+)\)")
elif (args.metric == "node_tags"):
data_regex = re.compile(r"Tags Node\((?P<id>\d+)\)")
else:
assert False
start_traversals_regex = re.compile(r"Start Traversals")
end_traversals_regex = re.compile(r"End Traversals")
arrival_level_regex = re.compile(r"Arrival Level\((?P<level_id>\d+)\)")
required_level_regex = re.compile(r"Required Level\((?P<level_id>\d+)\)")
with open(sys.argv[1]) as f:
i = 0
in_traversals = False
for line in f:
match = start_traversals_regex.match(line)
if match:
in_traversals = True
match = end_traversals_regex.match(line)
if match:
in_traversals = False
break
if in_traversals:
match = data_regex.match(line)
if match:
data.append(int(match.group("id")))
i += 1
match = arrival_level_regex.match(line)
if match:
arrival_level_indexes[i] = int(match.group("level_id"))
match = required_level_regex.match(line)
if match:
required_level_indexes[i] = int(match.group("level_id"))
#if i > 1000:
#break
max_value = max(data)
num_values = len(data)
print "Max: ", max_value
print "num values: ", num_values
y = range(num_values)
x = data
for access_number, level_id in arrival_level_indexes.iteritems():
print "Arrival Level", level_id, " at access ", access_number
plt.axhline(access_number, color="g", alpha=0.25)
for access_number, level_id in required_level_indexes.iteritems():
print "Required Level", level_id, " at access ", access_number
plt.axhline(access_number, color="r", alpha=0.25)
plt.scatter(x, y)
plt.xlabel("Array Index " + args.metric)
plt.ylabel("Time (access #)")
plt.xlim(0, max_value)
plt.ylim(0, num_values)
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()

View File

@ -0,0 +1,139 @@
#!/usr/bin/env python
import sys, argparse
import csv
import os
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("csv_file", default=None, help="CSV file with level runtimes")
parser.add_argument("--scale_size", default=False, action="store_true", help="Scale point size by serial level runtime")
parser.add_argument("--average", default=False, action="store_true", help="Draw average lines")
parser.add_argument("-f", default=None, help="Output filename")
args = parser.parse_args()
return args
def main():
args = parse_args()
data = {}
with open(args.csv_file) as f:
csv_reader = csv.DictReader(f)
for field in csv_reader.fieldnames:
data[field] = []
for row in csv_reader:
for field in csv_reader.fieldnames:
data[field].append(float(row[field]))
for series_name, data_values in data.iteritems():
print "\tSeries: ", series_name
print "\t# Values: ", len(data_values)
#Calculate derived series
derived_series = {}
speedup_fwd = {}
speedup_bck = {}
size_factor = 0
if args.scale_size:
size_factor = 2000
size_min = 10
serial_total = sum(data['serial_fwd'][:] + data['serial_bck'][:])
for i in xrange(len(data['serial_fwd'])):
width = data['Width'][i]
serial_fwd = data['serial_fwd'][i]
serial_bck = data['serial_bck'][i]
parrallel_fwd = data['parallel_fwd'][i]
parrallel_bck = data['parallel_bck'][i]
if parrallel_fwd != 0.0:
speedup = serial_fwd / parrallel_fwd
serial_frac = serial_fwd / serial_total
val = (speedup, serial_frac)
try:
speedup_fwd[width].append(val)
except KeyError:
speedup_fwd[width] = [val]
if parrallel_bck != 0.0:
speedup = serial_bck / parrallel_bck
serial_frac = serial_bck / serial_total
val = (speedup, serial_frac)
try:
speedup_bck[width].append(val)
except KeyError:
speedup_bck[width] = [val]
fwd_x = []
fwd_y = []
fwd_s = []
for width, values in speedup_fwd.iteritems():
for speedup, serial_frac in values:
fwd_x.append(width)
fwd_y.append(speedup)
fwd_s.append(size_factor*serial_frac + size_min)
bck_x = []
bck_y = []
bck_s = []
for width, values in speedup_bck.iteritems():
for speedup, serial_frac in values:
bck_x.append(width)
bck_y.append(speedup)
bck_s.append(size_factor*serial_frac + size_min)
#Averages
fwd_x_avg = []
fwd_y_avg = []
for width, values in sorted(speedup_fwd.iteritems()):
speedups = [x[0] for x in values]
avg = sum(speedups) / len(speedups)
#print "Width, avg", width, values, speedups
fwd_x_avg.append(width)
fwd_y_avg.append(avg)
bck_x_avg = []
bck_y_avg = []
for width, values in sorted(speedup_bck.iteritems()):
speedups = [x[0] for x in values]
avg = sum(speedups) / len(speedups)
#print "Width, avg", width, values, speedups
bck_x_avg.append(width)
bck_y_avg.append(avg)
plt.scatter(fwd_x, fwd_y, fwd_s, c='b', label="speedup_fwd")
plt.scatter(bck_x, bck_y, bck_s, c='g', label="speedup_bck")
if args.average:
plt.plot(fwd_x_avg, fwd_y_avg, c='b', label="Average FWD Speed-Up")
plt.plot(bck_x_avg, bck_y_avg, c='g', label="Average BCK Speed-Up")
plt.xscale("log")
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
ymin = 0
xmin = 1
plt.ylim(ymin,ymax)
plt.xlim(xmin,xmax)
plt.title(os.path.splitext(os.path.basename(args.csv_file))[0])
plt.xlabel("Level Width")
plt.ylabel("Parallel Speed-Up")
plt.legend(loc='upper left')
if args.f:
plt.savefig(args.f, dpi=300)
else:
plt.show()
def runningMean(x, N):
return np.convolve(x, np.ones((N,))/N, mode='same')
if __name__ == "__main__":
main()

View File

@ -0,0 +1,147 @@
#!/usr/bin/env python
import sys, argparse
import csv
import matplotlib.pyplot as plt
import numpy as np
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("csv_files", nargs=2, default=None, help="CSV files with level runtimes")
parser.add_argument("-f", default=None, help="Output filename")
parser.add_argument("--min_fwd_width", type=int, default=None, help="Minimum forward width for parllel")
parser.add_argument("--min_bck_width", type=int, default=None, help="Minimum backward width for parllel")
args = parser.parse_args()
return args
def main():
args = parse_args()
data = {}
for filename in args.csv_files:
with open(filename) as f:
data[filename] = {}
csv_reader = csv.DictReader(f)
for field in csv_reader.fieldnames:
data[filename][field] = []
for row in csv_reader:
for field in csv_reader.fieldnames:
data[filename][field].append(float(row[field]))
for filename, series in data.iteritems():
print "File: ", filename
#for series_name, data_values in series.iteritems():
#print "\tSeries: ", series_name
#print "\tValuse: ", data_values
#Calculate derived series
derived_series = {}
traversals = ['Fwd', 'Bck']
N_AVG = 100
for traversal in ['Fwd', 'Bck']:
#Per-level speed-up
first_array = data[args.csv_files[0]]["%s_Time" % traversal]
second_array = data[args.csv_files[1]]["%s_Time" % traversal]
derived_series["Level Speed-up %s" % traversal] = []
for i in xrange(len(first_array)):
if(second_array[i] == 0.0):
derived_series["Level Speed-up %s" % traversal].append(None)
else:
derived_series["Level Speed-up %s" % traversal].append(first_array[i] / second_array[i])
#Averaged level speed-up
derived_series["Avg Level Speed-up %s" % traversal] = []
for i in xrange(len(first_array)):
first_avg = sum(first_array[max(0,i-N_AVG):i]) / N_AVG
second_avg = sum(second_array[max(0,i-N_AVG):i]) / N_AVG
if(second_avg == 0.0):
derived_series["Avg Level Speed-up %s" % traversal].append(None)
else:
derived_series["Avg Level Speed-up %s" % traversal].append(first_avg / second_avg)
#Cummulative time
for filename in args.csv_files:
derived_series["Cumm Time %s %s" % (filename.split('_')[0], traversal)] = []
val_sum = 0.
for value in data[filename]["%s_Time" % traversal]:
val_sum += value
derived_series["Cumm Time %s %s" % (filename.split('_')[0], traversal)].append(val_sum)
#Cummulative Speed-up
derived_series["Cumm Speed-Up %s" % traversal] = []
first_cumm_array = derived_series["Cumm Time %s %s" % (args.csv_files[0].split('_')[0], traversal)]
second_cumm_array = derived_series["Cumm Time %s %s" % (args.csv_files[1].split('_')[0], traversal)]
for i in xrange(len(first_cumm_array)):
if(second_array[i] == 0.0):
derived_series["Cumm Speed-Up %s" % traversal].append(None)
else:
derived_series["Cumm Speed-Up %s" % traversal].append(first_cumm_array[i] / second_cumm_array[i])
#Plot results
fig, ax = plt.subplots(4, sharex=True)
#Cummulative Times
for series_name in derived_series.keys():
if series_name.startswith("Cumm Time"):
ax[0].plot(data[args.csv_files[0]]["Level"], derived_series[series_name], label=series_name[len("Cumm Time"):])
ax[0].legend(loc='best')
ax[0].set_ylabel("Cummulative\nTime (sec)")
ax[0].set_title("Per Level Performance Characteristics")
ax[0].set_ylim(bottom=0)
#Per-level Speed-up
for traversal in traversals:
series_name = "Avg Level Speed-up %s" % traversal
ax[1].plot(data[args.csv_files[0]]["Level"], derived_series[series_name], label=traversal)
ax[1].legend(loc='best')
#ax[1].set_yscale('log')
ax[1].set_ylabel("Level Speed-Up\n(N_AVG %d)" % N_AVG)
ax[1].set_ylim(bottom=0)
for traversal in traversals:
series_name = "Cumm Speed-Up %s" % traversal
ax[2].plot(data[args.csv_files[0]]["Level"], derived_series[series_name], label=traversal)
ax[2].legend(loc='best')
ax[2].set_ylabel("Cummulative\nSpeed-Up")
ax[2].set_ylim(bottom=0)
ax[3].plot(data[args.csv_files[0]]["Level"], data[args.csv_files[0]]["Width"], label="width")
if args.min_fwd_width:
ax[3].plot(data[args.csv_files[0]]["Level"], [args.min_fwd_width for x in xrange(len(data[args.csv_files[0]]["Level"]))], label="Min // Fwd")
if args.min_fwd_width:
ax[3].plot(data[args.csv_files[0]]["Level"], [args.min_bck_width for x in xrange(len(data[args.csv_files[0]]["Level"]))], label="Min // Bck")
ax[3].set_yscale('log')
ax[3].set_xlabel("Level")
ax[3].set_ylabel("Level Width")
#ax[3].set_xlim(right=len(data[args.csv_files[0]]["Level"]))
ax[3].legend(loc='best')
#ax[0].set_xscale('log')
#ax[1].set_xscale('log')
#ax[2].set_xscale('log')
#ax[3].set_xscale('log')
plt.tight_layout()
if args.f:
plt.savefig(args.f, dpi=300)
else:
plt.show()
if __name__ == "__main__":
main()

Some files were not shown because too many files have changed in this diff Show More