diff --git a/libs/EXTERNAL/libtatum/.gitignore b/libs/EXTERNAL/libtatum/.gitignore new file mode 100644 index 000000000..7a80f074c --- /dev/null +++ b/libs/EXTERNAL/libtatum/.gitignore @@ -0,0 +1,49 @@ +# +# Project Related Files +# + +#Executables + +#Generated files +build*/ +core +massif.out* +*.log + +#YCM +.ycm_extra_conf.py + +#gprof +gmon.out + +#valgrind +callgrind* + +#ctags +./tags + +# +# C++ related files +# + +# Compiled Object files +*.slo +*.lo +*.o + +# Compiled Dynamic libraries +*.so +*.dylib + +# Compiled Static libraries +*.lai +*.la +*.a + +# Dependency files +*.d + +# +# Python temp files +# +*.pyc diff --git a/libs/EXTERNAL/libtatum/.travis.yml b/libs/EXTERNAL/libtatum/.travis.yml new file mode 100644 index 000000000..347c69934 --- /dev/null +++ b/libs/EXTERNAL/libtatum/.travis.yml @@ -0,0 +1,46 @@ +language: cpp +dist: trusty #Ubuntu 14.04 by default +sudo: false #Use container based infrastructure + +matrix: + include: + #Extensive testing for base compiler + - env: TESTS="basic mcnc20" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-5 CXX=g++-5" + addons: { apt: { packages: ["cmake", "g++-5", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } } + + - env: TESTS="basic mcnc20" TATUM_EXECUTION_ENGINE=serial MATRIX_EVAL="CC=gcc-5 CXX=g++-5" + addons: { apt: { packages: ["cmake", "g++-5", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } } + + - env: TESTS="basic mcnc20" TATUM_EXECUTION_ENGINE=tbb MATRIX_EVAL="CC=gcc-5 CXX=g++-5" + addons: { apt: { packages: ["cmake", "g++-5", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } } + + #Simple testing for other compilers + - env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-8 CXX=g++-8" + addons: { apt: { packages: ["cmake", "g++-8", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } } + + - env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-7 CXX=g++-7" + addons: { apt: { packages: ["cmake", "g++-7", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } } + + - env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-6 CXX=g++-6" + addons: { apt: { packages: ["cmake", "g++-6", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } } + + - env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=gcc-4.9 CXX=g++-4.9" + addons: { apt: { packages: ["cmake", "g++-4.9", "libtbb-dev"], sources: ["ubuntu-toolchain-r-test"] } } + + - env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=clang-3.5 CXX=clang++-3.5" + addons: { apt: { packages: ["cmake", "clang-3.5", "g++-4.9", "libtbb-dev"], sources: ["llvm-toolchain-trusty-3.5", "ubuntu-toolchain-r-test"] } } + + - env: TESTS="basic" TATUM_EXECUTION_ENGINE=auto MATRIX_EVAL="CC=clang-5.0 CXX=clang++-5.0" + addons: { apt: { packages: ["cmake", "clang-5.0", "g++-4.9", "libtbb-dev"], sources: ["llvm-toolchain-trusty-5.0", "ubuntu-toolchain-r-test"] } } + +before_install: + - eval "${MATRIX_EVAL}" #Set compiler versions + - echo $CC + - echo $CXX + +script: + #Build + - mkdir -p build && pushd build && cmake .. -DTATUM_EXECUTION_ENGINE=$TATUM_EXECUTION_ENGINE && make -j2 && popd + + #Test + - ./scripts/reg_test.py --tatum_test_exec build/tatum_test/tatum_test --tatum_nworkers 2 $TESTS diff --git a/libs/EXTERNAL/libtatum/CMakeLists.txt b/libs/EXTERNAL/libtatum/CMakeLists.txt new file mode 100644 index 000000000..f654868ed --- /dev/null +++ b/libs/EXTERNAL/libtatum/CMakeLists.txt @@ -0,0 +1,37 @@ +cmake_minimum_required(VERSION 3.9) + +project("tatum") + +set(TATUM_EXECUTION_ENGINE "auto" CACHE STRING "Specify the framework for (potential) parallel execution") +set_property(CACHE TATUM_EXECUTION_ENGINE PROPERTY STRINGS auto serial tbb) + +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/modules") + +if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + #Set the default build type if not specified + if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release CACHE STRING + "Choose the type of build: None, Debug, Release, RelWithDebInfo, MinSizeRel" + FORCE) + endif() + message(STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}") + + + #Only set compiler flags if not a sub-project + set(WARN_FLAGS -Wall -Wextra -Wpedantic -Wcast-qual -Wcast-align -Wshadow -Wformat=2 -Wlogical-op -Wmissing-declarations -Wmissing-include-dirs -Wredundant-decls -Wswitch-default -Wundef -Wunused-variable -Wdisabled-optimization -Wnoexcept -Woverloaded-virtual -Wctor-dtor-privacy -Wnon-virtual-dtor) + + add_compile_options(${WARN_FLAGS}) + add_compile_options(-std=c++14) + + set(FLEX_BISON_WARN_SUPPRESS_FLAGS -Wno-switch-default -Wno-unused-parameter -Wno-sign-compare -Wno-missing-declarations) +endif() + +add_subdirectory(libtatum) + +if(${CMAKE_SOURCE_DIR} STREQUAL ${CMAKE_CURRENT_SOURCE_DIR}) + #Only build the parser, test executable and docs if not a sub-project + add_subdirectory(tatum_test) + add_subdirectory(libtatumparse) + add_subdirectory(tatumparse_test) + add_subdirectory(doc) +endif() diff --git a/libs/EXTERNAL/libtatum/LICENSE.txt b/libs/EXTERNAL/libtatum/LICENSE.txt new file mode 100644 index 000000000..bfcaf9a19 --- /dev/null +++ b/libs/EXTERNAL/libtatum/LICENSE.txt @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Kevin Murray + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/libs/EXTERNAL/libtatum/Makefile b/libs/EXTERNAL/libtatum/Makefile new file mode 100644 index 000000000..8adb4229f --- /dev/null +++ b/libs/EXTERNAL/libtatum/Makefile @@ -0,0 +1,68 @@ +#This is a simple wrapper hiding cmake from non-expert end users. +# +# It supports the targets: +# 'make' - builds everything (all libaries/executables) +# 'make clean' - removes generated build objects/libraries/executables etc. +# 'make distclean' - will clean everything including the cmake generated build files +# +# All other targets (e.g. 'make tatum_test') are passed to the cmake generated makefile +# and processed according to the CMakeLists.txt. +# +# To perform a debug build use: +# 'make BUILD_TYPE=debug' + +#Default build type +# Possible values: +# release +# debug +BUILD_TYPE = release + +#Allows users to pass parameters to cmake +# e.g. make CMAKE_PARAMS="-DVTR_ENABLE_SANITIZE=true" +override CMAKE_PARAMS := -DCMAKE_BUILD_TYPE=$(BUILD_TYPE) -G 'Unix Makefiles' ${CMAKE_PARAMS} + + +# -s : Suppresss makefile output (e.g. entering/leaving directories) +# --output-sync target : For parallel compilation ensure output for each target is synchronized (make version >= 4.0) +MAKEFLAGS := -s + +BUILD_DIR=./build +GENERATED_MAKEFILE := $(BUILD_DIR)/Makefile + +#Check for the cmake exectuable +CMAKE := $(shell command -v cmake 2> /dev/null) + +#Show test log on failures with 'make test' +export CTEST_OUTPUT_ON_FAILURE=TRUE + +#All targets in this make file are always out of date. +# This ensures that any make target requests are forwarded to +# the generated makefile +.PHONY: all distclean $(GENERATED_MAKEFILE) $(MAKECMDGOALS) + +#Build everything +all: $(GENERATED_MAKEFILE) + @+$(MAKE) -C $(BUILD_DIR) + +#Call the generated Makefile's clean, and then remove all cmake generated files +distclean: $(GENERATED_MAKEFILE) + @ echo "Cleaning build..." + @+$(MAKE) -C $(BUILD_DIR) clean + @ echo "Removing build system files.." + @ rm -rf $(BUILD_DIR) + @ rm -rf CMakeFiles CMakeCache.txt #In case 'cmake .' was run in the source directory + +#Call cmake to generate the main Makefile +$(GENERATED_MAKEFILE): +ifeq ($(CMAKE),) + $(error Required 'cmake' executable not found. On debian/ubuntu try 'sudo apt-get install cmake' to install) +endif + @ mkdir -p $(BUILD_DIR) + echo "cd $(BUILD_DIR) && $(CMAKE) $(CMAKE_PARAMS) .. " + cd $(BUILD_DIR) && $(CMAKE) $(CMAKE_PARAMS) .. + +#Forward any targets that are not named 'distclean' to the generated Makefile +ifneq ($(MAKECMDGOALS),distclean) +$(MAKECMDGOALS): $(GENERATED_MAKEFILE) + @+$(MAKE) -C $(BUILD_DIR) $(MAKECMDGOALS) +endif diff --git a/libs/EXTERNAL/libtatum/README.md b/libs/EXTERNAL/libtatum/README.md new file mode 100644 index 000000000..35026efd9 --- /dev/null +++ b/libs/EXTERNAL/libtatum/README.md @@ -0,0 +1,58 @@ +# Tatum: A Fast, Flexible Static Timing Analysis (STA) Engine for Digital Circuits + +[![Build Status](https://travis-ci.org/kmurray/tatum.svg?branch=master)](https://travis-ci.org/kmurray/tatum) + +## Overview +Tatum is a block-based Static Timing Analysis (STA) engine suitable for integration with Computer-Aided Design (CAD) tools, which analyze, implement and optimize digital circuits. +Tatum supports both setup (max-delay) and hold (min-delay) analysis, clock skew, multiple clocks and a variety of timing exceptions. + +Tatum is provided as a library (`libtatum`) which can be easily integrated into the host application. +Tatum operates on an abstract *timing graph* constructed by the host application, and can be configured to use an application defined delay calculator. + +Tatum is optimized for high performance, as required by optimizing CAD tools. +In particular: + * Tatum performs only a *single* set of graph traversals to calculate timing information for all clocks and analyses (setup and hold). + * Tatum's data structures are cache optimized + * Tatum supports parallel analysis using multiple CPU cores + +## How to Cite +If your work uses Tatum please cite the following as a general citation: + +K. E. Murray and V. Betz, "Tatum: Parallel Timing Analysis for Faster Design Cycles and Improved Optimization", *IEEE International Conference on Field-Programmable Technology (FPT)*, 2018 + +**Bibtex:** +``` +@inproceedings{c:tatum, + author = {Murray, Kevin E. and Betz, Vaughn}, + title = {Tatum: Parallel Timing Analysis for Faster Design Cycles and Improved Optimization}, + booktitle = {IEEE International Conference on Field-Programmable Technology (FPT)}, + year = {2018} +} +``` + +## Documentation +Comming soon. + +## Download +Comming soon. + +## Projects using Tatum + +Tatum is designed to be re-usable in a variety of appliations. + +Some of the known uses are: + * The [Verilog to Routing (VTR)](https://verilogtorouting.org) project for Field-Programmable Gate Array (FPGA) Architecture and CAD research. Tatum is used as the STA engine in the VPR placement and routing tool. + * The [CGRA-ME](http://cgra-me.ece.utoronto.ca/) framework for Coarse-Grained Reconfigurable Array (CGRA) Architecture research. + +*If your project is using Tatum please let us know!* + +## History + +### Why was Tatum created? +I had need for a high performance, flexible STA engine for my research into FPGA architecture and CAD tools. +I could find no suitable open source STA engines, and wrote my own. + +### Name Origin +A *tatum* is a unit of time used in the computational analysis of music \[[1]\], named after Jazz pianist [Art Tatum](https://en.wikipedia.org/wiki/Art_Tatum). + +[1]: http://web.media.mit.edu/~tristan/phd/dissertation/chapter3.html#x1-390003.4.3 diff --git a/libs/EXTERNAL/libtatum/_config.yml b/libs/EXTERNAL/libtatum/_config.yml new file mode 100644 index 000000000..2f7efbeab --- /dev/null +++ b/libs/EXTERNAL/libtatum/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-minimal \ No newline at end of file diff --git a/libs/EXTERNAL/libtatum/cmake/modules/FindTBB.cmake b/libs/EXTERNAL/libtatum/cmake/modules/FindTBB.cmake new file mode 100644 index 000000000..0a1a5bb77 --- /dev/null +++ b/libs/EXTERNAL/libtatum/cmake/modules/FindTBB.cmake @@ -0,0 +1,303 @@ +# The MIT License (MIT) +# +# Copyright (c) 2015 Justus Calvin +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# +# FindTBB +# ------- +# +# Find TBB include directories and libraries. +# +# Usage: +# +# find_package(TBB [major[.minor]] [EXACT] +# [QUIET] [REQUIRED] +# [[COMPONENTS] [components...]] +# [OPTIONAL_COMPONENTS components...]) +# +# where the allowed components are tbbmalloc and tbb_preview. Users may modify +# the behavior of this module with the following variables: +# +# * TBB_ROOT_DIR - The base directory the of TBB installation. +# * TBB_INCLUDE_DIR - The directory that contains the TBB headers files. +# * TBB_LIBRARY - The directory that contains the TBB library files. +# * TBB__LIBRARY - The path of the TBB the corresponding TBB library. +# These libraries, if specified, override the +# corresponding library search results, where +# may be tbb, tbb_debug, tbbmalloc, tbbmalloc_debug, +# tbb_preview, or tbb_preview_debug. +# * TBB_USE_DEBUG_BUILD - The debug version of tbb libraries, if present, will +# be used instead of the release version. +# +# Users may modify the behavior of this module with the following environment +# variables: +# +# * TBB_INSTALL_DIR +# * TBBROOT +# * LIBRARY_PATH +# +# This module will set the following variables: +# +# * TBB_FOUND - Set to false, or undefined, if we haven’t found, or +# don’t want to use TBB. +# * TBB__FOUND - If False, optional part of TBB sytem is +# not available. +# * TBB_VERSION - The full version string +# * TBB_VERSION_MAJOR - The major version +# * TBB_VERSION_MINOR - The minor version +# * TBB_INTERFACE_VERSION - The interface version number defined in +# tbb/tbb_stddef.h. +# * TBB__LIBRARY_RELEASE - The path of the TBB release version of +# , where may be tbb, tbb_debug, +# tbbmalloc, tbbmalloc_debug, tbb_preview, or +# tbb_preview_debug. +# * TBB__LIBRARY_DEGUG - The path of the TBB release version of +# , where may be tbb, tbb_debug, +# tbbmalloc, tbbmalloc_debug, tbb_preview, or +# tbb_preview_debug. +# +# The following varibles should be used to build and link with TBB: +# +# * TBB_INCLUDE_DIRS - The include directory for TBB. +# * TBB_LIBRARIES - The libraries to link against to use TBB. +# * TBB_LIBRARIES_RELEASE - The release libraries to link against to use TBB. +# * TBB_LIBRARIES_DEBUG - The debug libraries to link against to use TBB. +# * TBB_DEFINITIONS - Definitions to use when compiling code that uses +# TBB. +# * TBB_DEFINITIONS_RELEASE - Definitions to use when compiling release code that +# uses TBB. +# * TBB_DEFINITIONS_DEBUG - Definitions to use when compiling debug code that +# uses TBB. +# +# This module will also create the "tbb" target that may be used when building +# executables and libraries. + +include(FindPackageHandleStandardArgs) + +if(NOT TBB_FOUND) + + ################################## + # Check the build type + ################################## + + if(NOT DEFINED TBB_USE_DEBUG_BUILD) + if(CMAKE_BUILD_TYPE MATCHES "(Debug|DEBUG|debug|RelWithDebInfo|RELWITHDEBINFO|relwithdebinfo)") + set(TBB_BUILD_TYPE DEBUG) + else() + set(TBB_BUILD_TYPE RELEASE) + endif() + elseif(TBB_USE_DEBUG_BUILD) + set(TBB_BUILD_TYPE DEBUG) + else() + set(TBB_BUILD_TYPE RELEASE) + endif() + + ################################## + # Set the TBB search directories + ################################## + + # Define search paths based on user input and environment variables + set(TBB_SEARCH_DIR ${TBB_ROOT_DIR} $ENV{TBB_INSTALL_DIR} $ENV{TBBROOT}) + + # Define the search directories based on the current platform + if(CMAKE_SYSTEM_NAME STREQUAL "Windows") + set(TBB_DEFAULT_SEARCH_DIR "C:/Program Files/Intel/TBB" + "C:/Program Files (x86)/Intel/TBB") + + # Set the target architecture + if(CMAKE_SIZEOF_VOID_P EQUAL 8) + set(TBB_ARCHITECTURE "intel64") + else() + set(TBB_ARCHITECTURE "ia32") + endif() + + # Set the TBB search library path search suffix based on the version of VC + if(WINDOWS_STORE) + set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc11_ui") + elseif(MSVC14) + set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc14") + elseif(MSVC12) + set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc12") + elseif(MSVC11) + set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc11") + elseif(MSVC10) + set(TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc10") + endif() + + # Add the library path search suffix for the VC independent version of TBB + list(APPEND TBB_LIB_PATH_SUFFIX "lib/${TBB_ARCHITECTURE}/vc_mt") + + elseif(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + # OS X + set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb") + + # TODO: Check to see which C++ library is being used by the compiler. + if(NOT ${CMAKE_SYSTEM_VERSION} VERSION_LESS 13.0) + # The default C++ library on OS X 10.9 and later is libc++ + set(TBB_LIB_PATH_SUFFIX "lib/libc++" "lib") + else() + set(TBB_LIB_PATH_SUFFIX "lib") + endif() + elseif(CMAKE_SYSTEM_NAME STREQUAL "Linux") + # Linux + set(TBB_DEFAULT_SEARCH_DIR "/opt/intel/tbb") + + # TODO: Check compiler version to see the suffix should be /gcc4.1 or + # /gcc4.1. For now, assume that the compiler is more recent than + # gcc 4.4.x or later. + if(CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64") + set(TBB_LIB_PATH_SUFFIX "lib/intel64/gcc4.4") + elseif(CMAKE_SYSTEM_PROCESSOR MATCHES "^i.86$") + set(TBB_LIB_PATH_SUFFIX "lib/ia32/gcc4.4") + endif() + endif() + + ################################## + # Find the TBB include dir + ################################## + + find_path(TBB_INCLUDE_DIRS tbb/tbb.h + HINTS ${TBB_INCLUDE_DIR} ${TBB_SEARCH_DIR} + PATHS ${TBB_DEFAULT_SEARCH_DIR} + PATH_SUFFIXES include) + + ################################## + # Set version strings + ################################## + + if(TBB_INCLUDE_DIRS) + file(READ "${TBB_INCLUDE_DIRS}/tbb/tbb_stddef.h" _tbb_version_file) + string(REGEX REPLACE ".*#define TBB_VERSION_MAJOR ([0-9]+).*" "\\1" + TBB_VERSION_MAJOR "${_tbb_version_file}") + string(REGEX REPLACE ".*#define TBB_VERSION_MINOR ([0-9]+).*" "\\1" + TBB_VERSION_MINOR "${_tbb_version_file}") + string(REGEX REPLACE ".*#define TBB_INTERFACE_VERSION ([0-9]+).*" "\\1" + TBB_INTERFACE_VERSION "${_tbb_version_file}") + set(TBB_VERSION "${TBB_VERSION_MAJOR}.${TBB_VERSION_MINOR}") + endif() + + ################################## + # Find TBB components + ################################## + + if(TBB_VERSION VERSION_LESS 4.3) + set(TBB_SEARCH_COMPOMPONENTS tbb_preview tbbmalloc tbb) + else() + set(TBB_SEARCH_COMPOMPONENTS tbb_preview tbbmalloc_proxy tbbmalloc tbb) + endif() + + # Find each component + foreach(_comp ${TBB_SEARCH_COMPOMPONENTS}) + if(";${TBB_FIND_COMPONENTS};tbb;" MATCHES ";${_comp};") + + # Search for the libraries + find_library(TBB_${_comp}_LIBRARY_RELEASE ${_comp} + HINTS ${TBB_LIBRARY} ${TBB_SEARCH_DIR} + PATHS ${TBB_DEFAULT_SEARCH_DIR} ENV LIBRARY_PATH + PATH_SUFFIXES ${TBB_LIB_PATH_SUFFIX}) + + find_library(TBB_${_comp}_LIBRARY_DEBUG ${_comp}_debug + HINTS ${TBB_LIBRARY} ${TBB_SEARCH_DIR} + PATHS ${TBB_DEFAULT_SEARCH_DIR} ENV LIBRARY_PATH + PATH_SUFFIXES ${TBB_LIB_PATH_SUFFIX}) + + if(TBB_${_comp}_LIBRARY_DEBUG) + list(APPEND TBB_LIBRARIES_DEBUG "${TBB_${_comp}_LIBRARY_DEBUG}") + endif() + if(TBB_${_comp}_LIBRARY_RELEASE) + list(APPEND TBB_LIBRARIES_RELEASE "${TBB_${_comp}_LIBRARY_RELEASE}") + endif() + if(TBB_${_comp}_LIBRARY_${TBB_BUILD_TYPE} AND NOT TBB_${_comp}_LIBRARY) + set(TBB_${_comp}_LIBRARY "${TBB_${_comp}_LIBRARY_${TBB_BUILD_TYPE}}") + endif() + + if(TBB_${_comp}_LIBRARY AND EXISTS "${TBB_${_comp}_LIBRARY}") + set(TBB_${_comp}_FOUND TRUE) + else() + set(TBB_${_comp}_FOUND FALSE) + endif() + + # Mark internal variables as advanced + mark_as_advanced(TBB_${_comp}_LIBRARY_RELEASE) + mark_as_advanced(TBB_${_comp}_LIBRARY_DEBUG) + mark_as_advanced(TBB_${_comp}_LIBRARY) + + endif() + endforeach() + + ################################## + # Set compile flags and libraries + ################################## + + set(TBB_DEFINITIONS_RELEASE "") + set(TBB_DEFINITIONS_DEBUG "-DTBB_USE_DEBUG=1") + + if(TBB_LIBRARIES_${TBB_BUILD_TYPE}) + set(TBB_DEFINITIONS "${TBB_DEFINITIONS_${TBB_BUILD_TYPE}}") + set(TBB_LIBRARIES "${TBB_LIBRARIES_${TBB_BUILD_TYPE}}") + elseif(TBB_LIBRARIES_RELEASE) + set(TBB_DEFINITIONS "${TBB_DEFINITIONS_RELEASE}") + set(TBB_LIBRARIES "${TBB_LIBRARIES_RELEASE}") + elseif(TBB_LIBRARIES_DEBUG) + set(TBB_DEFINITIONS "${TBB_DEFINITIONS_DEBUG}") + set(TBB_LIBRARIES "${TBB_LIBRARIES_DEBUG}") + endif() + + find_package_handle_standard_args(TBB + REQUIRED_VARS TBB_INCLUDE_DIRS TBB_LIBRARIES + HANDLE_COMPONENTS + VERSION_VAR TBB_VERSION) + + ################################## + # Create targets + ################################## + + if(NOT CMAKE_VERSION VERSION_LESS 3.0 AND TBB_FOUND) + add_library(tbb SHARED IMPORTED) + set_target_properties(tbb PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES ${TBB_INCLUDE_DIRS} + IMPORTED_LOCATION ${TBB_LIBRARIES}) + if(TBB_LIBRARIES_RELEASE AND TBB_LIBRARIES_DEBUG) + set_target_properties(tbb PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "$<$,$>:TBB_USE_DEBUG=1>" + IMPORTED_LOCATION_DEBUG ${TBB_LIBRARIES_DEBUG} + IMPORTED_LOCATION_RELWITHDEBINFO ${TBB_LIBRARIES_DEBUG} + IMPORTED_LOCATION_RELEASE ${TBB_LIBRARIES_RELEASE} + IMPORTED_LOCATION_MINSIZEREL ${TBB_LIBRARIES_RELEASE} + ) + elseif(TBB_LIBRARIES_RELEASE) + set_target_properties(tbb PROPERTIES IMPORTED_LOCATION ${TBB_LIBRARIES_RELEASE}) + else() + set_target_properties(tbb PROPERTIES + INTERFACE_COMPILE_DEFINITIONS "${TBB_DEFINITIONS_DEBUG}" + IMPORTED_LOCATION ${TBB_LIBRARIES_DEBUG} + ) + endif() + endif() + + mark_as_advanced(TBB_INCLUDE_DIRS TBB_LIBRARIES) + + unset(TBB_ARCHITECTURE) + unset(TBB_BUILD_TYPE) + unset(TBB_LIB_PATH_SUFFIX) + unset(TBB_DEFAULT_SEARCH_DIR) + +endif() diff --git a/libs/EXTERNAL/libtatum/doc/CMakeLists.txt b/libs/EXTERNAL/libtatum/doc/CMakeLists.txt new file mode 100644 index 000000000..6bd35155a --- /dev/null +++ b/libs/EXTERNAL/libtatum/doc/CMakeLists.txt @@ -0,0 +1,18 @@ +#find_package(Doxygen) + +#if(DOXYGEN_FOUND) + + #set(doxyfile_in ${CMAKE_CURRENT_SOURCE_DIR}/doxyfile.in) + #set(doxyfile ${CMAKE_CURRENT_BINARY_DIR}/doxyfile) + + #configure_file(${doxyfile_in} ${doxyfile} @ONLY) + + #add_custom_target(doc + #COMMAND ${DOXYGEN_EXECUTABLE} ${doxyfile} + #WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR} + #COMMENT "Generating API Documentation with Doxygen" + #VERBATIM) + +#else() + #message(INFO "Doxygen not found. Documentation will not be built") +#endif() diff --git a/libs/EXTERNAL/libtatum/doc/doxyfile.in b/libs/EXTERNAL/libtatum/doc/doxyfile.in new file mode 100644 index 000000000..44182bec8 --- /dev/null +++ b/libs/EXTERNAL/libtatum/doc/doxyfile.in @@ -0,0 +1,106 @@ +# Doxyfile 1.8.6 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a double hash (##) is considered a comment and is placed in +# front of the TAG it is preceding. +# +# All text after a single hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists, items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (\" \"). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all text +# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv +# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv +# for the list of possible encodings. +# The default value is: UTF-8. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by +# double-quotes, unless you are using Doxywizard) that should identify the +# project for which the documentation is generated. This name is used in the +# title of most generated pages and in a few other places. +# The default value is: My Project. + +PROJECT_NAME = "@CMAKE_PROJECT_NAME@" + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. This +# could be handy for archiving the generated documentation or if some version +# control system is used. + +PROJECT_NUMBER = + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer a +# quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = "A Fast, Flexible Static Timing Analysis (STA) Engine for Digital Circuits" + +# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path. +# Stripping is only done if one of the specified strings matches the left-hand +# part of the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the path to +# strip. +# +# Note that you can specify absolute paths here, but also relative paths, which +# will be relative from the directory where doxygen is started. +# This tag requires that the tag FULL_PATH_NAMES is set to YES. + +STRIP_FROM_PATH = @PROJECT_SOURCE_DIR@ @PROJECT_BINARY_DIR@ + +#--------------------------------------------------------------------------- +# Configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag is used to specify the files and/or directories that contain +# documented source files. You may enter file names like myfile.cpp or +# directories like /usr/src/myproject. Separate the files or directories with +# spaces. +# Note: If this tag is empty the current directory is searched. + +INPUT = @PROJECT_SOURCE_DIR@/src/libtatum @PROJECT_SOURCE_DIR@/README.md + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and +# *.h) to filter out the source-files in the directories. If left blank the +# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, +# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, +# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, +# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, +# *.qsf, *.as and *.js. + +FILE_PATTERNS = *.h *.hpp *.c *.cpp *.tpp *.inl + +# The RECURSIVE tag can be used to specify whether or not subdirectories should +# be searched for input files as well. +# The default value is: NO. + +RECURSIVE = YES + +# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that +# is part of the input, its contents will be placed on the main page +# (index.html). This can be useful if you have a project on for instance GitHub +# and want to reuse the introduction page also for the doxygen output. + +USE_MDFILE_AS_MAINPAGE = README.md + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see +# http://www.mathjax.org) which uses client side Javascript for the rendering +# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# installed or if you want to formulas look prettier in the HTML output. When +# enabled you may also need to install MathJax separately and configure the path +# to it using the MATHJAX_RELPATH option. +# The default value is: NO. +# This tag requires that the tag GENERATE_HTML is set to YES. + +USE_MATHJAX = YES diff --git a/libs/EXTERNAL/libtatum/libtatum/CMakeLists.txt b/libs/EXTERNAL/libtatum/libtatum/CMakeLists.txt new file mode 100644 index 000000000..64fd60241 --- /dev/null +++ b/libs/EXTERNAL/libtatum/libtatum/CMakeLists.txt @@ -0,0 +1,87 @@ +project("libtatum") + + +# +# +#Check for parallel execution framework support +# +# +set(TBB_SUPPORTED FALSE) + +#Check for Thread Building Blocks support +find_package(TBB) + +if (TBB_FOUND) + set(TBB_SUPPORTED TRUE) +endif() + +# +# +# Determine parallel execution framework +# +# +set(TATUM_USE_EXECUTION_ENGINE "") #The actual execution engine to use (based on what is available) + +if (TATUM_EXECUTION_ENGINE STREQUAL "auto") + #Pick the best supported execution engine + if (TBB_SUPPORTED) + set(TATUM_USE_EXECUTION_ENGINE "tbb") + else() + set(TATUM_USE_EXECUTION_ENGINE "serial") + endif() +else() + #The user requested a specific execution engine + if (TATUM_EXECUTION_ENGINE STREQUAL "tbb") + if (NOT TBB_SUPPORTED) + message(FATAL_ERROR "Tatum: Requested execution engine '${TATUM_EXECUTION_ENGINE}' not found") + endif() + elseif (TATUM_EXECUTION_ENGINE STREQUAL "serial") + #Pass + else() + message(FATAL_ERROR "Tatum: Unrecognized execution engine '${TATUM_EXECUTION_ENGINE}'") + endif() + #Set the engine to use (it must be valid or we would have errored out) + set(TATUM_USE_EXECUTION_ENGINE "${TATUM_EXECUTION_ENGINE}") +endif() + +# +# +# Build files configuration +# +# + +#Source files for the library +file(GLOB_RECURSE LIB_TATUM_SOURCES *.cpp) +file(GLOB_RECURSE LIB_TATUM_HEADERS *.hpp) + +#Include directories +set(LIB_TATUM_INCLUDE_DIRS ${CMAKE_CURRENT_SOURCE_DIR}) + +# +# +# Define the actual build targets +# +# + +#Define the library +add_library(libtatum STATIC ${LIB_TATUM_SOURCES} ${LIB_TATUM_HEADERS}) +set_target_properties(libtatum PROPERTIES PREFIX "") #Avoid extra 'lib' prefix + +#Export library headers +target_include_directories(libtatum PUBLIC ${LIB_TATUM_INCLUDE_DIRS}) + +#Setup parallel execution +if (TATUM_USE_EXECUTION_ENGINE STREQUAL "tbb") + message(STATUS "Tatum: will support parallel execution using '${TATUM_USE_EXECUTION_ENGINE}'") + + target_compile_definitions(libtatum PUBLIC TATUM_USE_TBB) + target_link_libraries(libtatum tbb) + target_link_libraries(libtatum tbbmalloc_proxy) #Use the scalable memory allocator + +elseif (TATUM_USE_EXECUTION_ENGINE STREQUAL "serial") + #Nothing to do + message(STATUS "Tatum: will support only serial execution") +else() + message(FATAL_ERROR "Tatum: Unrecognized concrete execution engine '${TATUM_USE_EXECUTION_ENGINE}'") +endif() + diff --git a/libs/EXTERNAL/libtatum/libtatum/tatum.hpp b/libs/EXTERNAL/libtatum/libtatum/tatum.hpp new file mode 100644 index 000000000..d80f97f56 --- /dev/null +++ b/libs/EXTERNAL/libtatum/libtatum/tatum.hpp @@ -0,0 +1,18 @@ +#ifndef TATUM_HPP +#define TATUM_HPP + +#include "tatum_fwd.hpp" + +//Data structures +#include "tatum/TimingGraph.hpp" +#include "tatum/TimingConstraints.hpp" + +//Analyzers +#include "tatum/timing_analyzers.hpp" +#include "tatum/graph_walkers.hpp" +#include "tatum/analyzer_factory.hpp" + +//Reporting +#include "tatum/TimingReporter.hpp" + +#endif diff --git a/libs/EXTERNAL/libtatum/libtatum/tatum/HoldAnalysis.hpp b/libs/EXTERNAL/libtatum/libtatum/tatum/HoldAnalysis.hpp new file mode 100644 index 000000000..1dc44a1f0 --- /dev/null +++ b/libs/EXTERNAL/libtatum/libtatum/tatum/HoldAnalysis.hpp @@ -0,0 +1,72 @@ +#pragma once +#include +#include + +#include "tatum/tags/TimingTags.hpp" +#include "tatum/TimingGraph.hpp" +#include "tatum/TimingConstraints.hpp" + +#include "tatum/graph_visitors/CommonAnalysisVisitor.hpp" +#include "tatum/graph_visitors/HoldAnalysisOps.hpp" + +#include "tatum/util/tatum_assert.hpp" + + +namespace tatum { + +/** \file + * The 'HoldAnalysis' class defines the operations needed by a timing analyzer class + * to perform a hold (min/shortest path) analysis. + * + * \see SetupAnalysis + * \see TimingAnalyzer + * + * Hold Analysis Principles + * ========================== + * + * In addition to satisfying setup constraints, data arriving at a Flip-Flop (FF) must stay (i.e. + * remain stable) for some amount of time *after* the capturing clock edge arrives. This time is + * referred to as the 'Hold Time' of the Flip-Flop, \f$ t_h \f$. If the data changes during the + * hold window (i.e. less than \f$ t_h \f$ after the capturing clock edge) then the FF may go + * meta-stable failing to capture the data. This will put the circuit in an invalid state (this + * is bad). + * + * More formally, for correct operation at every cycle we require the following to be satisfied + * for every path in the circuit: + * + * \f[ + * t_{clk\_insrt}^{(launch)} + t_{cq}^{(min)} + t_{comb}^{(min)} \geq t_{clk\_insrt}^{(capture)} + t_h (1) + * \f] + * + * where \f$ t_{clk\_insrt}^{(launch)}, t_{clk\_insrt}^{(capture)} \f$ are the up/downstream FF clock insertion + * delays, \f$ t_{cq}^{(min)} \f$ is the minimum clock-to-q delay of the upstream FF, \f$ t_{comb}^{(min)} \f$ is + * the minimum combinational path delay from the upstream to downstream FFs, and \f$ t_h \f$ is the hold + * constraint of the downstream FF. + * + * Note that unlike in setup analysis this behaviour is indepenant of clock period. + * Intuitively, hold analysis can be viewed as data from the upstream FF trampling the data launched + * on the previous cycle before it can be captured by the downstream FF. + */ + +/** \class HoldAnalysis + * + * The 'HoldAnalysis' class defines the operations needed by a timing analyzer + * to perform a hold (min/shortest path) analysis. + * + * \see SetupAnalysis + * \see TimingAnalyzer + * \see CommonAnalysisVisitor + */ +class HoldAnalysis : public detail::CommonAnalysisVisitor { + + public: + HoldAnalysis(size_t num_tags, size_t num_slacks) + : detail::CommonAnalysisVisitor(num_tags, num_slacks) {} + + TimingTags::tag_range hold_tags(const NodeId node) const { return ops_.get_tags(node); } + TimingTags::tag_range hold_tags(const NodeId node, TagType type) const { return ops_.get_tags(node, type); } + TimingTags::tag_range hold_edge_slacks(const EdgeId edge) const { return ops_.get_edge_slacks(edge); } + TimingTags::tag_range hold_node_slacks(const NodeId node) const { return ops_.get_node_slacks(node); } +}; + +} //namepsace diff --git a/libs/EXTERNAL/libtatum/libtatum/tatum/SetupAnalysis.hpp b/libs/EXTERNAL/libtatum/libtatum/tatum/SetupAnalysis.hpp new file mode 100644 index 000000000..28a1282da --- /dev/null +++ b/libs/EXTERNAL/libtatum/libtatum/tatum/SetupAnalysis.hpp @@ -0,0 +1,139 @@ +#pragma once +#include +#include + +#include "tatum/TimingGraph.hpp" +#include "tatum/TimingConstraints.hpp" +#include "tatum/tags/TimingTags.hpp" + +#include "tatum/graph_visitors/CommonAnalysisVisitor.hpp" +#include "tatum/graph_visitors/SetupAnalysisOps.hpp" + +#include "tatum/util/tatum_assert.hpp" + + +namespace tatum { + +/** \file + * The 'SetupAnalysis' class defines the operations needed by a GraphWalker class + * to perform a setup (max/longest path) analysis. It satisifes and extends the GraphVisitor + * concept class. + * + * Setup Analysis Principles + * ========================== + * To operate correctly data arriving at a Flip-Flop (FF) must arrive (i.e. be stable) some + * amount of time BEFORE the capturing clock edge. This time is referred to as the + * 'Setup Time' of the Flip-Flop. If the data arrives during the setup window + * (i.e. less than \f$ t_s \f$ before the capturing clock edge) then the FF may go meta-stable + * failing to capture the data. This will put the circuit in an invalid state (this is bad). + * + * More formally, for correct operation at every cycle we require the following to be satisfied + * for every path in the circuit: + * + * \f[ + * t_{clock}^{(launch)} + t_{cq}^{(max)} + t_{comb}^{(max)} \leq t_{clock}^{(capture)} - t_s (1) + * \f] + * + * where \f$ t_{clock}^{(launch)} \f$ is the clock arrival time at the upstream FF, \f$ t_{cq}^{(max)} \f$ is the + * maximum clock-to-q delay of the upstream FF, and \f$ t_{comb}^{(max)} \f$ is the maximum combinational + * path delay from the upstream to downstream FFs, \f$ t_s \f$ is the setup constraint of the downstream + * FF, and \f$ t_{clock}^{(capture)} \f$ is the clock arrival time at the downstream FF. + * + * Typically \f$ t_{clock}^{(launch)} \f$ and \f$ t_{clock}^{(capture)} \f$ have a periodic relationship. + * To ensure a non-optimistic analysis we need to consider the minimum possible time difference between + * \f$ t_{clock}^{(capture)} \f$ and \f$ t_{clock}^{(launch)} \f$. In the case where the launch and capture clocks + * are the same this *constraint* (\f$ T_{cstr} \f$) value is simply the clock period (\f$ T_{clk} \f$); however, + * in multi-clock scenarios the closest alignment of clock edges is used, which may be smaller than the clock + * period of either the launch or capture clock (depending on their period and phase relationship). It is + * typically assumed that the launch clock arrives at time zero (even if this is not strictly true + * in an absolute sense, such as if the clock has a rise time > 0, we can achieve this by adjusting + * the value of \f$ T_{cstr} \f$). + * + * Additionally, the arrival times of the launch and capture edges are unlikely to be perfectly + * aligned in practise, due to clock skew. + * + * Formally, we can re-write our condition for correct operation as: + * \f[ + * t_{clk\_insrt}^{(launch)} + t_{cq}^{(max)} + t_{comb}^{(max)} \leq t_{clk\_insrt}^{(capture)} - t_s + T_{cstr} (2) + * \f] + * + * where \f$ t_{clk\_insrt}^{(launch)} \f$ and \f$ t_{clk\_insrt}^{(capture)} \f$ represent the clock insertion delays + * to the launch/capture nodes, and \f$ T_{cstr} \f$ the ideal constraint (excluding skew). + * + * We refer to the left hand side of (2) as the 'arrival time' (when the data actually arrives at a FF capture node), + * and the right hand side as the 'required time' (when the data is required to arrive for correct operation), so + * (2) becomes: + * \f[ + * t_{arr}^{(max)} \leq t_{req}^{(min)} (3) + * \f] + */ + + /** + * Setup Analysis Implementation + * =============================== + * When we perform setup analysis we follow the formulation of (2), by performing two key operations: traversing + * the clock network, and traversing the data paths. + * + * Clock Propogation + * ------------------- + * We traverse the clock network to determine the clock delays (\f$ t_{clk\_insrt}^{(launch)} \f$, \f$ t_{clk\_insrt}^{(capture)} \f$) + * at each FF clock pin (FF_CLOCK node in the timing graph). Clock related delay information is stored and + * propogated as sets of 'clock tags'. + * + * Data Propogation + * ------------------ + * We traverse the data paths in the circuit to determine \f$ t_{arr}^{(max)} \f$ in (2). + * In particular, at each node in the circuit we track the maximum arrival time to it as a set + * of 'data_tags'. + * + * The timing graph uses separte nodes to represent FF Pins (FF_IPIN, FF_OPIN) and FF Sources/Sinks + * (FF_SOURCE/FF_SINK). As a result \f$ t_{cq} \f$ delays are actually placed on the edges between FF_SOURCEs + * and FF_OPINs, \f$ t_s \f$ values are similarily placed as edge delays between FF_IPINs and FF_SINKs. + * + * The data launch nodes (e.g. FF_SOURCES) have their, arrival times initialized to the clock insertion + * delay (\f$ t_{clk\_insrt}^{(launch)} \f$). Then at each downstream node we store the maximum of the upstream + * arrival time plus the incoming edge delay as the arrival time at each node. As a result the final + * arrival time at a capture node (e.g. FF_SINK) is the maximum arival time (\f$ t_{arr}^{(max)} \f$). + * + * + * The required times at sink nodes (Primary Outputs, e.g. FF_SINKs) can be calculated directly after clock propogation, + * since the value of \f$ T_{cstr} \f$ is determined ahead of time. + * + * To facilitate the calculation of slack at each node we also propogate required times back through + * the timing graph. This follows a similar procedure to arrival propogation but happens in reverse + * order (from POs to PIs), with each node taking the minumum of the downstream required time minus + * the edge delay. + * + * Combined Clock & Data Propogation + * ----------------------------------- + * In practice the clock and data propogation, although sometimes logically useful to think of as separate, + * are combined into a single traversal for efficiency (minimizing graph walks). This is enabled by + * building the timing graph with edges between FF_CLOCK and FF_SINK/FF_SOUCE nodes. On the forward traversal + * we propogate clock tags from known clock sources, which are converted to data tags (with appropriate + * *arrival times*) at FF_SOURCE nodes, and data tags (with appropriate *required times*) at FF_SINK nodes. + * + * \see HoldAnalysis + */ + +/** \class HoldAnalysis + * + * The 'HoldAnalysis' class defines the operations needed by a timing analyzer + * to perform a hold (min/shortest path) analysis. + * + * \see SetupAnalysis + * \see TimingAnalyzer + * \see CommonAnalysisVisitor + */ +class SetupAnalysis : public detail::CommonAnalysisVisitor { + + public: + SetupAnalysis(size_t num_tags, size_t num_slacks) + : detail::CommonAnalysisVisitor(num_tags, num_slacks) {} + + TimingTags::tag_range setup_tags(const NodeId node) const { return ops_.get_tags(node); } + TimingTags::tag_range setup_tags(const NodeId node, TagType type) const { return ops_.get_tags(node, type); } + TimingTags::tag_range setup_edge_slacks(const EdgeId edge) const { return ops_.get_edge_slacks(edge); } + TimingTags::tag_range setup_node_slacks(const NodeId node) const { return ops_.get_node_slacks(node); } +}; + +} //namepsace diff --git a/libs/EXTERNAL/libtatum/libtatum/tatum/SetupHoldAnalysis.hpp b/libs/EXTERNAL/libtatum/libtatum/tatum/SetupHoldAnalysis.hpp new file mode 100644 index 000000000..e9eb39b12 --- /dev/null +++ b/libs/EXTERNAL/libtatum/libtatum/tatum/SetupHoldAnalysis.hpp @@ -0,0 +1,82 @@ +#pragma once +#include "SetupAnalysis.hpp" +#include "HoldAnalysis.hpp" +#include "tatum/delay_calc/DelayCalculator.hpp" +#include "tatum/graph_visitors/GraphVisitor.hpp" + +namespace tatum { + +/** \class SetupHoldAnalysis + * + * The SetupHoldAnalysis class defines the operations needed by a timing analyzer + * to perform a combinded setup (max/long path) and hold (min/shortest path) analysis. + * + * Performing both analysis simultaneously tends to be more efficient than performing + * them sperately due to cache locality. + * + * \see SetupAnalysis + * \see HoldAnalysis + * \see TimingAnalyzer + */ +class SetupHoldAnalysis : public GraphVisitor { + public: + SetupHoldAnalysis(size_t num_tags, size_t num_slacks) + : setup_visitor_(num_tags, num_slacks) + , hold_visitor_(num_tags, num_slacks) {} + + void do_reset_node(const NodeId node_id) override { + setup_visitor_.do_reset_node(node_id); + hold_visitor_.do_reset_node(node_id); + } + void do_reset_edge(const EdgeId edge_id) override { + setup_visitor_.do_reset_edge(edge_id); + hold_visitor_.do_reset_edge(edge_id); + } + + bool do_arrival_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) override { + bool setup_unconstrained = setup_visitor_.do_arrival_pre_traverse_node(tg, tc, node_id); + bool hold_unconstrained = hold_visitor_.do_arrival_pre_traverse_node(tg, tc, node_id); + + return setup_unconstrained || hold_unconstrained; + } + + bool do_required_pre_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const NodeId node_id) override { + bool setup_unconstrained = setup_visitor_.do_required_pre_traverse_node(tg, tc, node_id); + bool hold_unconstrained = hold_visitor_.do_required_pre_traverse_node(tg, tc, node_id); + + return setup_unconstrained || hold_unconstrained; + } + + void do_arrival_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) override { + setup_visitor_.do_arrival_traverse_node(tg, tc, dc, node_id); + hold_visitor_.do_arrival_traverse_node(tg, tc, dc, node_id); + } + + void do_required_traverse_node(const TimingGraph& tg, const TimingConstraints& tc, const DelayCalculator& dc, const NodeId node_id) override { + setup_visitor_.do_required_traverse_node(tg, tc, dc, node_id); + hold_visitor_.do_required_traverse_node(tg, tc, dc, node_id); + } + + void do_slack_traverse_node(const TimingGraph& tg, const DelayCalculator& dc, const NodeId node) override { + setup_visitor_.do_slack_traverse_node(tg, dc, node); + hold_visitor_.do_slack_traverse_node(tg, dc, node); + } + + TimingTags::tag_range setup_tags(const NodeId node_id) const { return setup_visitor_.setup_tags(node_id); } + TimingTags::tag_range setup_tags(const NodeId node_id, TagType type) const { return setup_visitor_.setup_tags(node_id, type); } + TimingTags::tag_range setup_edge_slacks(const EdgeId edge_id) const { return setup_visitor_.setup_edge_slacks(edge_id); } + TimingTags::tag_range setup_node_slacks(const NodeId node_id) const { return setup_visitor_.setup_node_slacks(node_id); } + + TimingTags::tag_range hold_tags(const NodeId node_id) const { return hold_visitor_.hold_tags(node_id); } + TimingTags::tag_range hold_tags(const NodeId node_id, TagType type) const { return hold_visitor_.hold_tags(node_id, type); } + TimingTags::tag_range hold_edge_slacks(const EdgeId edge_id) const { return hold_visitor_.hold_edge_slacks(edge_id); } + TimingTags::tag_range hold_node_slacks(const NodeId node_id) const { return hold_visitor_.hold_node_slacks(node_id); } + + SetupAnalysis& setup_visitor() { return setup_visitor_; } + HoldAnalysis& hold_visitor() { return hold_visitor_; } + private: + SetupAnalysis setup_visitor_; + HoldAnalysis hold_visitor_; +}; + +} //namepsace diff --git a/libs/EXTERNAL/libtatum/libtatum/tatum/Time.hpp b/libs/EXTERNAL/libtatum/libtatum/tatum/Time.hpp new file mode 100644 index 000000000..1ee7261bd --- /dev/null +++ b/libs/EXTERNAL/libtatum/libtatum/tatum/Time.hpp @@ -0,0 +1,79 @@ +#pragma once + +#include +#include +#include + +#ifndef TIME_VEC_WIDTH +#define TIME_VEC_WIDTH 1 +#endif + +/* + * What alignment is required? + */ +#if TIME_VEC_WIDTH > 8 +//Required for aligned access with AVX +# define TIME_MEM_ALIGN 8*sizeof(float) + +#elif TIME_VEC_WIDTH >= 4 +//Required for aligned access with SSE +# define TIME_MEM_ALIGN 4*sizeof(float) + +#endif //TIME_VEC_WIDTH + +#if TIME_VEC_WIDTH > 1 +# include +#endif + +namespace tatum { + +class Time { + public: + typedef float scalar_type; + public: //Constructors + Time(): Time(NAN) {} + + ///Initialize from float types + explicit Time(const double time) { set_value(time); } + + public: //Accessors + ///The current time value + scalar_type value() const; + + ///Indicates whether the current time value is valid + bool valid() const; + + ///Updates the time value with the max of itself and other + void max(const Time& other); + ///Updates the time value with the min of itself and other + void min(const Time& other); + + ///Allow conversions to scalar_type (usually float) + operator scalar_type() const { return value(); } + + public: //Mutators + ///Set the current time value to time + void set_value(scalar_type time); + + + Time& operator+=(const Time& rhs); + Time& operator-=(const Time& rhs); + + friend bool operator==(const Time lhs, const Time rhs); + friend bool operator<(const Time lhs, const Time rhs); + friend bool operator>(const Time lhs, const Time rhs); + friend Time operator-(const Time val); + friend Time operator+(const Time val); + + private: +#if TIME_VEC_WIDTH > 1 + alignas(TIME_MEM_ALIGN) std::array time_; +#else + scalar_type time_; +#endif + +}; + +} //namepsace + +#include "Time.inl" diff --git a/libs/EXTERNAL/libtatum/libtatum/tatum/Time.inl b/libs/EXTERNAL/libtatum/libtatum/tatum/Time.inl new file mode 100644 index 000000000..20e320953 --- /dev/null +++ b/libs/EXTERNAL/libtatum/libtatum/tatum/Time.inl @@ -0,0 +1,128 @@ +#include +#include + +#include "Time.hpp" + +namespace tatum { + +/* + * Class members + */ + +#if TIME_VEC_WIDTH > 1 + /* + * Serial / inferred SIMD + */ + inline void Time::set_value(scalar_type time) { + for(size_t i = 0; i < time_.size(); i++) { + time_[i] = time; + } + } + + inline void Time::max(const Time& other) { + for(size_t i = 0; i < time_.size(); i++) { + //Use conditional so compiler will vectorize + time_[i] = (time_[i] > other.time_[i]) ? time_[i] : other.time_[i]; + } + } + + inline void Time::min(const Time& other) { + for(size_t i = 0; i < time_.size(); i++) { + //Use conditional so compiler will vectorize + time_[i] = (time_[i] < other.time_[i]) ? time_[i] : other.time_[i]; + } + } + + inline Time& Time::operator+=(const Time& rhs) { + for(size_t i = 0; i < time_.size(); i++) { + time_[i] += rhs.time_[i]; + } + + return *this; + } + + inline Time& Time::operator-=(const Time& rhs) { + for(size_t i = 0; i < time_.size(); i++) { + time_[i] -= rhs.time_[i]; + } + + return *this; + } + + inline Time::scalar_type Time::value() const { return time_[0]; } + + inline bool Time::valid() const { + //This is a reduction with a function call inside, + //so we can't vectorize easily + bool result = true; + for(size_t i = 0; i < time_.size(); i++) { + result &= !std::isnan(time_[i]); + } + return result; + } +#else //Scalar case (TIME_VEC_WIDTH == 1) + inline Time::scalar_type Time::value() const { return time_; } + inline void Time::set_value(scalar_type time) { time_ = time; } + inline bool Time::valid() const { return !std::isnan(time_); } + + inline void Time::max(const Time& other) { time_ = std::max(time_, other.time_); } + inline void Time::min(const Time& other) { time_ = std::min(time_, other.time_); } + inline Time& Time::operator+=(const Time& rhs) { time_ += rhs.time_; return *this; } + inline Time& Time::operator-=(const Time& rhs) { time_ -= rhs.time_; return *this; } + +#endif //TIME_VEC_WIDTH + +/* + * External functions + */ + +#if TIME_VEC_WIDTH > 1 +inline Time operator-(Time in) { + for(size_t i = 0; i < time_.size(); i++) { + in.time_[i] = -in.time_[i]; + } + return in; +} +inline Time operator+(Time in) { + for(size_t i = 0; i < time_.size(); i++) { + in.time_[i] = +in.time_[i]; + } + return in; +} +#else //Scalar case (TIME_VEC_WIDTH == 1) +inline bool operator==(const Time lhs, const Time rhs) { + return lhs.time_ == rhs.time_; +} + +inline bool operator<(const Time lhs, const Time rhs) { + return lhs.time_ < rhs.time_; +} + +inline bool operator>(const Time lhs, const Time rhs) { + return lhs.time_ > rhs.time_; +} + +inline Time operator-(Time in) { + in.time_ = -in.time_; + return in; +} +inline Time operator+(Time in) { + in.time_ = +in.time_; + return in; +} +#endif //TIME_VEC_WIDTH + +inline Time operator+(Time lhs, const Time& rhs) { + return lhs += rhs; +} + +inline Time operator-(Time lhs, const Time& rhs) { + return lhs -= rhs; +} + +inline std::ostream& operator<<(std::ostream& os, const Time& time) { + os << time.value(); + return os; +} + +} //namepsace diff --git a/libs/EXTERNAL/libtatum/libtatum/tatum/TimingConstraints.cpp b/libs/EXTERNAL/libtatum/libtatum/tatum/TimingConstraints.cpp new file mode 100644 index 000000000..510fadd9e --- /dev/null +++ b/libs/EXTERNAL/libtatum/libtatum/tatum/TimingConstraints.cpp @@ -0,0 +1,571 @@ +#include +#include + +#include "tatum/util/tatum_assert.hpp" +#include "tatum/TimingConstraints.hpp" + +using std::cout; +using std::endl; + +namespace tatum { + +TimingConstraints::domain_range TimingConstraints::clock_domains() const { + return tatum::util::make_range(domain_ids_.begin(), domain_ids_.end()); +} + +std::string TimingConstraints::clock_domain_name(const DomainId id) const { + if(!id) { + return std::string("*"); + } + return domain_names_[id]; +} + +NodeId TimingConstraints::clock_domain_source_node(const DomainId id) const { + return domain_sources_[id]; +} + +bool TimingConstraints::is_virtual_clock(const DomainId id) const { + //No source node indicates a virtual clock + return !bool(clock_domain_source_node(id)); +} + +DomainId TimingConstraints::node_clock_domain(const NodeId id) const { + //This is currenlty a linear search through all clock sources and + //I/O constraints, could be made more efficient but it is only called + //rarely (i.e. during pre-traversals) + + //Is it a clock source? + DomainId source_domain = find_node_source_clock_domain(id); + if(source_domain) return source_domain; + + //Does it have an input constarint? + for(DelayType delay_type : {DelayType::MAX, DelayType::MIN}) { + for(auto kv : input_constraints(delay_type)) { + auto node_id = kv.first; + auto domain_id = kv.second.domain; + + //TODO: Assumes a single clock per node + if(node_id == id) return domain_id; + } + } + + //Does it have an output constraint? + for(DelayType delay_type : {DelayType::MAX, DelayType::MIN}) { + for(auto kv : output_constraints(delay_type)) { + auto node_id = kv.first; + auto domain_id = kv.second.domain; + + //TODO: Assumes a single clock per node + if(node_id == id) return domain_id; + } + } + + //None found + return DomainId::INVALID(); +} + +bool TimingConstraints::node_is_clock_source(const NodeId id) const { + //Returns a DomainId which converts to true if valid + return bool(find_node_source_clock_domain(id)); +} + +bool TimingConstraints::node_is_constant_generator(const NodeId id) const { + return constant_generators_.count(id); +} + +DomainId TimingConstraints::find_node_source_clock_domain(const NodeId node_id) const { + //We don't expect many clocks, so the linear search should be fine + for(auto domain_id : clock_domains()) { + if(clock_domain_source_node(domain_id) == node_id) { + return domain_id; + } + } + + return DomainId::INVALID(); +} + +DomainId TimingConstraints::find_clock_domain(const std::string& name) const { + //Linear search for name + // We don't expect a large number of domains + for(DomainId id : clock_domains()) { + if(clock_domain_name(id) == name) { + return id; + } + } + + //Not found + return DomainId::INVALID(); +} + +bool TimingConstraints::should_analyze(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node) const { + TATUM_ASSERT(src_domain); + TATUM_ASSERT(sink_domain); + + //If there is a domain pair + capture node or domain pair constraint then it should be analyzed + return setup_constraints_.count(NodeDomainPair(src_domain, sink_domain, capture_node)) + || setup_constraints_.count(NodeDomainPair(src_domain, sink_domain, NodeId::INVALID())) + || hold_constraints_.count(NodeDomainPair(src_domain, sink_domain, capture_node)) + || hold_constraints_.count(NodeDomainPair(src_domain, sink_domain, NodeId::INVALID())); +} + +Time TimingConstraints::hold_constraint(const DomainId src_domain, const DomainId sink_domain, const NodeId capture_node) const { + //Try to find the capture node-specific constraint + auto iter = hold_constraints_.find(NodeDomainPair(src_domain, sink_domain, capture_node)); + if(iter != hold_constraints_.end()) { + return iter->second; + } + + //If no capture node specific constraint was found, fallback to the domain pair constriant + iter = hold_constraints_.find(NodeDomainPair(src_domain, sink_domain, NodeId::INVALID())); + if(iter != hold_constraints_.end()) { + return iter->second; + } + + //No constraint found + return std::numeric_limits