Merge pull request #908 from lnis-uofu/pyformat
Now python code is formatted and checked on CI
This commit is contained in:
commit
4d23f547ea
|
@ -21,6 +21,8 @@ jobs:
|
|||
code_type: "-cpp"
|
||||
- name: "XML"
|
||||
code_type: "-xml"
|
||||
- name: "Python"
|
||||
code_type: "-py"
|
||||
steps:
|
||||
- name: Cancel previous
|
||||
uses: styfle/cancel-workflow-action@0.9.1
|
||||
|
@ -31,11 +33,14 @@ jobs:
|
|||
uses: actions/checkout@v2
|
||||
|
||||
- name: Install dependencies
|
||||
run: sudo bash ./.github/workflows/install_dependencies_build.sh
|
||||
run: |
|
||||
sudo bash ./.github/workflows/install_dependencies_build.sh
|
||||
sudo python3 -m pip install -r requirements.txt
|
||||
|
||||
- name: Dump tool versions
|
||||
run: |
|
||||
clang-format-10 --version
|
||||
black --version
|
||||
|
||||
- name: Check format
|
||||
run: ./dev/check-format.sh ${{ matrix.config.code_type }}
|
||||
|
|
11
Makefile
11
Makefile
|
@ -43,6 +43,7 @@ endif
|
|||
PYTHON_EXEC ?= python3
|
||||
CLANG_FORMAT_EXEC ?= clang-format-10
|
||||
XML_FORMAT_EXEC ?= xmllint
|
||||
PYTHON_FORMAT_EXEC ?= black
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
export COMMENT_EXTRACT
|
||||
|
@ -83,6 +84,16 @@ format-xml:
|
|||
XMLLINT_INDENT=" " && ${XML_FORMAT_EXEC} --format $${f} --output $${f} || exit 1; \
|
||||
done
|
||||
|
||||
format-py:
|
||||
# Format all the python scripts under this project, excluding submodules
|
||||
for f in `find openfpga_flow/scripts -iname *.py`; \
|
||||
do \
|
||||
${PYTHON_FORMAT_EXEC} $${f} --line-length 100 || exit 1; \
|
||||
done
|
||||
|
||||
format-all: format-cpp format-xml format-py
|
||||
# Format all the C/C++, XML and Python codes
|
||||
|
||||
clean:
|
||||
# Remove current build results
|
||||
rm -rf ${BUILD_DIR} yosys/install
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Script Name : arch_file_updater.py
|
||||
# Description : This script designed to update architecture files
|
||||
# Description : This script designed to update architecture files
|
||||
# from a lower version to higher version
|
||||
# Author : Xifan Tang
|
||||
# Email : xifan@osfpga.org
|
||||
|
@ -19,17 +19,12 @@ import re
|
|||
#####################################################################
|
||||
# Error codes
|
||||
#####################################################################
|
||||
error_codes = {
|
||||
"SUCCESS": 0,
|
||||
"ERROR": 1,
|
||||
"OPTION_ERROR": 2,
|
||||
"FILE_ERROR": 3
|
||||
}
|
||||
error_codes = {"SUCCESS": 0, "ERROR": 1, "OPTION_ERROR": 2, "FILE_ERROR": 3}
|
||||
|
||||
#####################################################################
|
||||
# Initialize logger
|
||||
# Initialize logger
|
||||
#####################################################################
|
||||
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO);
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
|
||||
|
||||
#####################################################################
|
||||
# Upgrade an architecture XML file from version 1.1 syntax to version 1.2
|
||||
|
@ -41,77 +36,86 @@ logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO);
|
|||
# - The attribute 'capacity' of parent <tile> is removed
|
||||
#####################################################################
|
||||
def convert_arch_xml_from_v1p1_to_v1p2(input_fname, output_fname):
|
||||
# Constants
|
||||
TILE_ROOT_TAG = "tiles"
|
||||
TILE_NODE_TAG = "tile"
|
||||
SUB_TILE_NODE_TAG = "sub_tile"
|
||||
NAME_TAG = "name"
|
||||
CAPACITY_TAG = "capacity"
|
||||
# Constants
|
||||
TILE_ROOT_TAG = "tiles"
|
||||
TILE_NODE_TAG = "tile"
|
||||
SUB_TILE_NODE_TAG = "sub_tile"
|
||||
NAME_TAG = "name"
|
||||
CAPACITY_TAG = "capacity"
|
||||
|
||||
# Log runtime and status
|
||||
status = error_codes["SUCCESS"]
|
||||
start_time = time.time()
|
||||
# Log runtime and status
|
||||
status = error_codes["SUCCESS"]
|
||||
start_time = time.time()
|
||||
|
||||
log_str = "Converting \'" + input_fname + "\'" + " to " + "\'" + output_fname + "\'..."
|
||||
logging.info(log_str)
|
||||
# Parse the input file
|
||||
doc = minidom.parse(input_fname)
|
||||
log_str = "Converting '" + input_fname + "'" + " to " + "'" + output_fname + "'..."
|
||||
logging.info(log_str)
|
||||
# Parse the input file
|
||||
doc = minidom.parse(input_fname)
|
||||
|
||||
# Iterate over <tile> nodes
|
||||
num_tile_roots = len(doc.getElementsByTagName(TILE_ROOT_TAG))
|
||||
if (num_tile_roots != 1):
|
||||
logging.info("Found " + str(num_tile_roots) + " <" + TILE_ROOT_TAG + ">")
|
||||
logging.error("Fail to find a require node (one and only one) <" + TILE_ROOT_TAG + "> under the root node!")
|
||||
return error_codes["ERROR"]
|
||||
tile_root = doc.getElementsByTagName(TILE_ROOT_TAG)[0]
|
||||
for tile_node in tile_root.getElementsByTagName(TILE_NODE_TAG):
|
||||
# Create a new child node <sub_tile>
|
||||
sub_tile_node = doc.createElement(SUB_TILE_NODE_TAG)
|
||||
# Add attributes to the new child node
|
||||
sub_tile_node.setAttribute(NAME_TAG, tile_node.getAttribute(NAME_TAG))
|
||||
if tile_node.hasAttribute(CAPACITY_TAG):
|
||||
sub_tile_node.setAttribute(CAPACITY_TAG, tile_node.getAttribute(CAPACITY_TAG))
|
||||
# Delete the out-of-date attributes
|
||||
tile_node.removeAttribute(CAPACITY_TAG)
|
||||
# Move other subelements to the new child node
|
||||
for child in tile_node.childNodes:
|
||||
# Add the node to the child node
|
||||
child_clone = child.cloneNode(deep=True)
|
||||
sub_tile_node.appendChild(child_clone)
|
||||
# Remove no longer required child nodes
|
||||
while (tile_node.hasChildNodes()):
|
||||
tile_node.removeChild(tile_node.firstChild)
|
||||
# Append the sub tile child to the tile node
|
||||
tile_node.appendChild(sub_tile_node)
|
||||
# Iterate over <tile> nodes
|
||||
num_tile_roots = len(doc.getElementsByTagName(TILE_ROOT_TAG))
|
||||
if num_tile_roots != 1:
|
||||
logging.info("Found " + str(num_tile_roots) + " <" + TILE_ROOT_TAG + ">")
|
||||
logging.error(
|
||||
"Fail to find a require node (one and only one) <"
|
||||
+ TILE_ROOT_TAG
|
||||
+ "> under the root node!"
|
||||
)
|
||||
return error_codes["ERROR"]
|
||||
tile_root = doc.getElementsByTagName(TILE_ROOT_TAG)[0]
|
||||
for tile_node in tile_root.getElementsByTagName(TILE_NODE_TAG):
|
||||
# Create a new child node <sub_tile>
|
||||
sub_tile_node = doc.createElement(SUB_TILE_NODE_TAG)
|
||||
# Add attributes to the new child node
|
||||
sub_tile_node.setAttribute(NAME_TAG, tile_node.getAttribute(NAME_TAG))
|
||||
if tile_node.hasAttribute(CAPACITY_TAG):
|
||||
sub_tile_node.setAttribute(CAPACITY_TAG, tile_node.getAttribute(CAPACITY_TAG))
|
||||
# Delete the out-of-date attributes
|
||||
tile_node.removeAttribute(CAPACITY_TAG)
|
||||
# Move other subelements to the new child node
|
||||
for child in tile_node.childNodes:
|
||||
# Add the node to the child node
|
||||
child_clone = child.cloneNode(deep=True)
|
||||
sub_tile_node.appendChild(child_clone)
|
||||
# Remove no longer required child nodes
|
||||
while tile_node.hasChildNodes():
|
||||
tile_node.removeChild(tile_node.firstChild)
|
||||
# Append the sub tile child to the tile node
|
||||
tile_node.appendChild(sub_tile_node)
|
||||
|
||||
# Output the modified content
|
||||
with open(output_fname, "w") as output_xml_f:
|
||||
doc.writexml(output_xml_f, indent='', addindent=" ", newl='')
|
||||
doc.unlink()
|
||||
# Output the modified content
|
||||
with open(output_fname, "w") as output_xml_f:
|
||||
doc.writexml(output_xml_f, indent="", addindent=" ", newl="")
|
||||
doc.unlink()
|
||||
|
||||
# Finish up
|
||||
end_time = time.time()
|
||||
time_diff = timedelta(seconds=(end_time - start_time))
|
||||
log_end_str1 = "[Done]"
|
||||
log_end_str2 = " took " + str(time_diff)
|
||||
logging.info(
|
||||
log_end_str1 + "." * (len(log_str) - len(log_end_str1) - len(log_end_str2)) + log_end_str2
|
||||
)
|
||||
return status
|
||||
|
||||
# Finish up
|
||||
end_time = time.time()
|
||||
time_diff = timedelta(seconds=(end_time - start_time))
|
||||
log_end_str1 = "[Done]"
|
||||
log_end_str2 = " took " + str(time_diff)
|
||||
logging.info(log_end_str1 + "." * (len(log_str) - len(log_end_str1) - len(log_end_str2)) + log_end_str2)
|
||||
return status
|
||||
|
||||
#####################################################################
|
||||
# Main function
|
||||
#####################################################################
|
||||
if __name__ == '__main__':
|
||||
# Execute when the module is not initialized from an import statement
|
||||
if __name__ == "__main__":
|
||||
# Execute when the module is not initialized from an import statement
|
||||
|
||||
# Parse the options and apply sanity checks
|
||||
parser = argparse.ArgumentParser(description='Convert an architecture file from a lower version to a higher version')
|
||||
parser.add_argument('--input_file',
|
||||
required=True,
|
||||
help='Path to input architecture file')
|
||||
parser.add_argument('--output_file',
|
||||
default="converted_arch.xml",
|
||||
help='Path to output converted architecture file')
|
||||
args = parser.parse_args()
|
||||
# Parse the options and apply sanity checks
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Convert an architecture file from a lower version to a higher version"
|
||||
)
|
||||
parser.add_argument("--input_file", required=True, help="Path to input architecture file")
|
||||
parser.add_argument(
|
||||
"--output_file",
|
||||
default="converted_arch.xml",
|
||||
help="Path to output converted architecture file",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Run conversion: from v1.1 syntax to v1.2 syntax
|
||||
exit(convert_arch_xml_from_v1p1_to_v1p2(args.input_file, args.output_file))
|
||||
# Run conversion: from v1.1 syntax to v1.2 syntax
|
||||
exit(convert_arch_xml_from_v1p1_to_v1p2(args.input_file, args.output_file))
|
||||
|
|
|
@ -23,23 +23,36 @@ csv_metric_tag = "metric"
|
|||
#####################################################################
|
||||
# Initialize logger
|
||||
#####################################################################
|
||||
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
|
||||
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG)
|
||||
|
||||
#####################################################################
|
||||
# Parse the options
|
||||
# - [mandatory option] the file path to .csv file
|
||||
#####################################################################
|
||||
parser = argparse.ArgumentParser(
|
||||
description='A checker for hetergeneous block mapping in OpenFPGA flow')
|
||||
parser.add_argument('--check_csv_file', required=True,
|
||||
help='Specify the to-be-checked csv file constaining flow-run information')
|
||||
parser.add_argument('--reference_csv_file', required=True,
|
||||
help='Specify the reference csv file constaining flow-run information')
|
||||
parser.add_argument('--metric_checklist_csv_file', required=True,
|
||||
help='Specify the csv file constaining metrics to be checked')
|
||||
description="A checker for hetergeneous block mapping in OpenFPGA flow"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--check_csv_file",
|
||||
required=True,
|
||||
help="Specify the to-be-checked csv file constaining flow-run information",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--reference_csv_file",
|
||||
required=True,
|
||||
help="Specify the reference csv file constaining flow-run information",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--metric_checklist_csv_file",
|
||||
required=True,
|
||||
help="Specify the csv file constaining metrics to be checked",
|
||||
)
|
||||
# By default, allow a 50% tolerance when checking metrics
|
||||
parser.add_argument('--check_tolerance', default="0.5,1.5",
|
||||
help='Specify the tolerance when checking metrics. Format <lower_bound>,<upper_bound>')
|
||||
parser.add_argument(
|
||||
"--check_tolerance",
|
||||
default="0.5,1.5",
|
||||
help="Specify the tolerance when checking metrics. Format <lower_bound>,<upper_bound>",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
#####################################################################
|
||||
|
@ -48,37 +61,45 @@ args = parser.parse_args()
|
|||
# Otherwise, error out
|
||||
#####################################################################
|
||||
if not isfile(args.check_csv_file):
|
||||
logging.error("Invalid csv file to check: " + args.check_csv_file + "\nFile does not exist!\n")
|
||||
exit(1)
|
||||
logging.error("Invalid csv file to check: " + args.check_csv_file + "\nFile does not exist!\n")
|
||||
exit(1)
|
||||
|
||||
if not isfile(args.reference_csv_file):
|
||||
logging.error("Invalid reference csv file: " + args.reference_csv_file + "\nFile does not exist!\n")
|
||||
exit(1)
|
||||
logging.error(
|
||||
"Invalid reference csv file: " + args.reference_csv_file + "\nFile does not exist!\n"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
if not isfile(args.metric_checklist_csv_file):
|
||||
logging.error("Invalid metric checklist csv file: " + args.metric_checklist_csv_file + "\nFile does not exist!\n")
|
||||
exit(1)
|
||||
logging.error(
|
||||
"Invalid metric checklist csv file: "
|
||||
+ args.metric_checklist_csv_file
|
||||
+ "\nFile does not exist!\n"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
#####################################################################
|
||||
# Parse a checklist for metrics to be checked
|
||||
#####################################################################
|
||||
metric_checklist_csv_file = open(args.metric_checklist_csv_file, "r")
|
||||
metric_checklist_csv_content = csv.DictReader(filter(lambda row : row[0]!='#', metric_checklist_csv_file), delimiter=',')
|
||||
metric_checklist_csv_content = csv.DictReader(
|
||||
filter(lambda row: row[0] != "#", metric_checklist_csv_file), delimiter=","
|
||||
)
|
||||
# Hash the reference results with the name tag
|
||||
metric_checklist = []
|
||||
for row in metric_checklist_csv_content:
|
||||
metric_checklist.append(row[csv_metric_tag]);
|
||||
metric_checklist.append(row[csv_metric_tag])
|
||||
|
||||
#####################################################################
|
||||
# Parse the reference csv file
|
||||
# Skip any line start with '#' which is treated as comments
|
||||
#####################################################################
|
||||
ref_csv_file = open(args.reference_csv_file, "r")
|
||||
ref_csv_content = csv.DictReader(filter(lambda row : row[0]!='#', ref_csv_file), delimiter=',')
|
||||
ref_csv_content = csv.DictReader(filter(lambda row: row[0] != "#", ref_csv_file), delimiter=",")
|
||||
# Hash the reference results with the name tag
|
||||
ref_results = {}
|
||||
for row in ref_csv_content:
|
||||
ref_results[row[csv_name_tag]] = row;
|
||||
ref_results[row[csv_name_tag]] = row
|
||||
|
||||
#####################################################################
|
||||
# Parse the tolerance to be applied when checking metrics
|
||||
|
@ -89,41 +110,62 @@ upper_bound_factor = float(args.check_tolerance.split(",")[1])
|
|||
#####################################################################
|
||||
# Parse the csv file to check
|
||||
#####################################################################
|
||||
with open(args.check_csv_file, newline='') as check_csv_file:
|
||||
results_to_check = csv.DictReader(check_csv_file, delimiter=',')
|
||||
checkpoint_count = 0
|
||||
check_error_count = 0
|
||||
for row in results_to_check:
|
||||
# Start from line 1 and check information
|
||||
for metric_to_check in metric_checklist:
|
||||
# Check if the metric is in a range
|
||||
if (lower_bound_factor * float(ref_results[row[csv_name_tag]][metric_to_check]) > float(row[metric_to_check])) or (upper_bound_factor * float(ref_results[row[csv_name_tag]][metric_to_check]) < float(row[metric_to_check])) :
|
||||
# Check QoR failed, error out
|
||||
logging.error("Benchmark " + str(row[csv_name_tag]) + " failed in checking '" + str(metric_to_check) +"'\n" + "Found: " + str(row[metric_to_check]) + " but expected: " + str(ref_results[row[csv_name_tag]][metric_to_check]) + " outside range [" + str(lower_bound_factor * 100) + "%, " + str(upper_bound_factor * 100) + "%]")
|
||||
check_error_count += 1
|
||||
# Pass this metric check, increase counter
|
||||
checkpoint_count += 1
|
||||
logging.info("Checked " + str(checkpoint_count) + " metrics")
|
||||
logging.info("See " + str(check_error_count) + " QoR failures")
|
||||
with open(args.check_csv_file, newline="") as check_csv_file:
|
||||
results_to_check = csv.DictReader(check_csv_file, delimiter=",")
|
||||
checkpoint_count = 0
|
||||
check_error_count = 0
|
||||
for row in results_to_check:
|
||||
# Start from line 1 and check information
|
||||
for metric_to_check in metric_checklist:
|
||||
# Check if the metric is in a range
|
||||
if (
|
||||
lower_bound_factor * float(ref_results[row[csv_name_tag]][metric_to_check])
|
||||
> float(row[metric_to_check])
|
||||
) or (
|
||||
upper_bound_factor * float(ref_results[row[csv_name_tag]][metric_to_check])
|
||||
< float(row[metric_to_check])
|
||||
):
|
||||
# Check QoR failed, error out
|
||||
logging.error(
|
||||
"Benchmark "
|
||||
+ str(row[csv_name_tag])
|
||||
+ " failed in checking '"
|
||||
+ str(metric_to_check)
|
||||
+ "'\n"
|
||||
+ "Found: "
|
||||
+ str(row[metric_to_check])
|
||||
+ " but expected: "
|
||||
+ str(ref_results[row[csv_name_tag]][metric_to_check])
|
||||
+ " outside range ["
|
||||
+ str(lower_bound_factor * 100)
|
||||
+ "%, "
|
||||
+ str(upper_bound_factor * 100)
|
||||
+ "%]"
|
||||
)
|
||||
check_error_count += 1
|
||||
# Pass this metric check, increase counter
|
||||
checkpoint_count += 1
|
||||
logging.info("Checked " + str(checkpoint_count) + " metrics")
|
||||
logging.info("See " + str(check_error_count) + " QoR failures")
|
||||
|
||||
if (0 < check_error_count):
|
||||
exit(1)
|
||||
if 0 < check_error_count:
|
||||
exit(1)
|
||||
|
||||
#####################################################################
|
||||
# Post checked results on stdout:
|
||||
# reaching here, it means all the checks have passed
|
||||
#####################################################################
|
||||
with open(args.check_csv_file, newline='') as check_csv_file:
|
||||
results_to_check = csv.DictReader(check_csv_file, delimiter=',')
|
||||
# Print out keywords: name + metric checklist
|
||||
print(str(csv_name_tag) + " ", end='')
|
||||
for metric_to_check in metric_checklist:
|
||||
print(str(metric_to_check) + " ", end='')
|
||||
print("")
|
||||
|
||||
for row in results_to_check:
|
||||
# Start from line 1, print checked metrics
|
||||
print(row[csv_name_tag] + " ", end='')
|
||||
with open(args.check_csv_file, newline="") as check_csv_file:
|
||||
results_to_check = csv.DictReader(check_csv_file, delimiter=",")
|
||||
# Print out keywords: name + metric checklist
|
||||
print(str(csv_name_tag) + " ", end="")
|
||||
for metric_to_check in metric_checklist:
|
||||
print(row[metric_to_check] + " ", end='')
|
||||
print(str(metric_to_check) + " ", end="")
|
||||
print("")
|
||||
|
||||
for row in results_to_check:
|
||||
# Start from line 1, print checked metrics
|
||||
print(row[csv_name_tag] + " ", end="")
|
||||
for metric_to_check in metric_checklist:
|
||||
print(row[metric_to_check] + " ", end="")
|
||||
print("")
|
||||
|
|
|
@ -33,16 +33,10 @@ def draw_connections(width, height, connections):
|
|||
dwg.add(dwg_main)
|
||||
|
||||
for w in range(1, width + 2):
|
||||
dwg_main.add(
|
||||
dwg.line(
|
||||
(w * SCALE, SCALE), (w * SCALE, (height + 1) * SCALE), stroke="red"
|
||||
)
|
||||
)
|
||||
dwg_main.add(dwg.line((w * SCALE, SCALE), (w * SCALE, (height + 1) * SCALE), stroke="red"))
|
||||
|
||||
for h in range(1, height + 2):
|
||||
dwg_main.add(
|
||||
dwg.line((SCALE, h * SCALE), ((width + 1) * SCALE, h * SCALE), stroke="red")
|
||||
)
|
||||
dwg_main.add(dwg.line((SCALE, h * SCALE), ((width + 1) * SCALE, h * SCALE), stroke="red"))
|
||||
|
||||
path = "M "
|
||||
for point in connections:
|
||||
|
|
|
@ -39,63 +39,70 @@ if sys.version_info[0] < 3:
|
|||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
LOG_FORMAT = "%(levelname)5s (%(threadName)15s) - %(message)s"
|
||||
if util.find_spec("coloredlogs"):
|
||||
coloredlogs.install(level='INFO', stream=sys.stdout,
|
||||
fmt=LOG_FORMAT)
|
||||
coloredlogs.install(level="INFO", stream=sys.stdout, fmt=LOG_FORMAT)
|
||||
else:
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
|
||||
format=LOG_FORMAT)
|
||||
logger = logging.getLogger('OpenFPGA_Task_logs')
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format=LOG_FORMAT)
|
||||
logger = logging.getLogger("OpenFPGA_Task_logs")
|
||||
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Read commandline arguments
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('tasks', nargs='+')
|
||||
parser.add_argument('--maxthreads', type=int, default=2,
|
||||
help="Number of fpga_flow threads to run default = 2," +
|
||||
"Typically <= Number of processors on the system")
|
||||
parser.add_argument('--remove_run_dir', type=str,
|
||||
help="Remove run dir " +
|
||||
"'all' to remove all." +
|
||||
"<int>,<int> to remove specific run dir" +
|
||||
"<int>-<int> To remove range of directory")
|
||||
parser.add_argument('--config', help="Override default configuration")
|
||||
parser.add_argument('--test_run', action="store_true",
|
||||
help="Dummy run shows final generated VPR commands")
|
||||
parser.add_argument('--debug', action="store_true",
|
||||
help="Run script in debug mode")
|
||||
parser.add_argument('--continue_on_fail', action="store_true",
|
||||
help="Exit script with return code")
|
||||
parser.add_argument('--show_thread_logs', action="store_true",
|
||||
help="Skips logs from running thread")
|
||||
parser.add_argument("tasks", nargs="+")
|
||||
parser.add_argument(
|
||||
"--maxthreads",
|
||||
type=int,
|
||||
default=2,
|
||||
help="Number of fpga_flow threads to run default = 2,"
|
||||
+ "Typically <= Number of processors on the system",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remove_run_dir",
|
||||
type=str,
|
||||
help="Remove run dir "
|
||||
+ "'all' to remove all."
|
||||
+ "<int>,<int> to remove specific run dir"
|
||||
+ "<int>-<int> To remove range of directory",
|
||||
)
|
||||
parser.add_argument("--config", help="Override default configuration")
|
||||
parser.add_argument(
|
||||
"--test_run", action="store_true", help="Dummy run shows final generated VPR commands"
|
||||
)
|
||||
parser.add_argument("--debug", action="store_true", help="Run script in debug mode")
|
||||
parser.add_argument("--continue_on_fail", action="store_true", help="Exit script with return code")
|
||||
parser.add_argument(
|
||||
"--show_thread_logs", action="store_true", help="Skips logs from running thread"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Read script configuration file
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
task_script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
DESIGN_PATH_ENV=os.environ['design_path']
|
||||
DESIGN_TOP_ENV=os.environ['design_top']
|
||||
script_env_vars = ({"PATH": {
|
||||
"DESIGN_PATH": DESIGN_PATH_ENV,
|
||||
"DESIGN_TOP":DESIGN_TOP_ENV,
|
||||
"OPENFPGA_FLOW_PATH": task_script_dir,
|
||||
"ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"),
|
||||
"OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"),
|
||||
"BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"),
|
||||
"TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"),
|
||||
"SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"),
|
||||
"VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"),
|
||||
"OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir,
|
||||
os.pardir))}})
|
||||
DESIGN_PATH_ENV = os.environ["design_path"]
|
||||
DESIGN_TOP_ENV = os.environ["design_top"]
|
||||
script_env_vars = {
|
||||
"PATH": {
|
||||
"DESIGN_PATH": DESIGN_PATH_ENV,
|
||||
"DESIGN_TOP": DESIGN_TOP_ENV,
|
||||
"OPENFPGA_FLOW_PATH": task_script_dir,
|
||||
"ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"),
|
||||
"OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"),
|
||||
"BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"),
|
||||
"TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"),
|
||||
"SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"),
|
||||
"VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"),
|
||||
"OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)),
|
||||
}
|
||||
}
|
||||
|
||||
config = ConfigParser(interpolation=ExtendedInterpolation())
|
||||
config.read_dict(script_env_vars)
|
||||
config.read_file(open(os.path.join(task_script_dir, 'run_fpga_task.conf')))
|
||||
config.read_file(open(os.path.join(task_script_dir, "run_fpga_task.conf")))
|
||||
gc = config["GENERAL CONFIGURATION"]
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
validate_command_line_arguments()
|
||||
for eachtask in args.tasks:
|
||||
|
@ -113,6 +120,7 @@ def main():
|
|||
logger.info("Task execution completed")
|
||||
exit(0)
|
||||
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Subroutines starts here
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
|
@ -155,14 +163,13 @@ def remove_run_dir():
|
|||
|
||||
try:
|
||||
for eachdir in remove_dir:
|
||||
logger.info('Removing run_dir %s' % (eachdir))
|
||||
if os.path.exists('latest'):
|
||||
if eachdir == os.readlink('latest'):
|
||||
logger.info("Removing run_dir %s" % (eachdir))
|
||||
if os.path.exists("latest"):
|
||||
if eachdir == os.readlink("latest"):
|
||||
remove_dir += ["latest"]
|
||||
shutil.rmtree(eachdir, ignore_errors=True)
|
||||
except:
|
||||
logger.exception("Failed to remove %s run directory" %
|
||||
(eachdir or "Unknown"))
|
||||
logger.exception("Failed to remove %s run directory" % (eachdir or "Unknown"))
|
||||
|
||||
|
||||
def generate_each_task_actions(taskname):
|
||||
|
@ -179,46 +186,56 @@ def generate_each_task_actions(taskname):
|
|||
elif os.path.isdir(repo_tasks):
|
||||
curr_task_dir = repo_tasks
|
||||
else:
|
||||
clean_up_and_exit("Task directory [%s] not found" % taskname + " locally at [%s]" % local_tasks + " or in OpenFPGA task directory [%s]" % repo_tasks)
|
||||
clean_up_and_exit(
|
||||
"Task directory [%s] not found" % taskname
|
||||
+ " locally at [%s]" % local_tasks
|
||||
+ " or in OpenFPGA task directory [%s]" % repo_tasks
|
||||
)
|
||||
|
||||
os.chdir(curr_task_dir)
|
||||
|
||||
curr_task_conf_file = os.path.join(curr_task_dir, "config", "task.conf")
|
||||
if not os.path.isfile(curr_task_conf_file):
|
||||
clean_up_and_exit(
|
||||
"Missing configuration file for task %s" % curr_task_dir)
|
||||
clean_up_and_exit("Missing configuration file for task %s" % curr_task_dir)
|
||||
|
||||
# Create run directory for current task run ./runxxx
|
||||
run_dirs = [int(os.path.basename(x)[-3:]) for x in glob.glob('run*[0-9]')]
|
||||
curr_run_dir = "run%03d" % (max(run_dirs+[0, ])+1)
|
||||
run_dirs = [int(os.path.basename(x)[-3:]) for x in glob.glob("run*[0-9]")]
|
||||
curr_run_dir = "run%03d" % (
|
||||
max(
|
||||
run_dirs
|
||||
+ [
|
||||
0,
|
||||
]
|
||||
)
|
||||
+ 1
|
||||
)
|
||||
if args.remove_run_dir:
|
||||
remove_run_dir()
|
||||
return
|
||||
try:
|
||||
os.mkdir(curr_run_dir)
|
||||
if os.path.islink('latest') or os.path.exists('latest'):
|
||||
if os.path.islink("latest") or os.path.exists("latest"):
|
||||
os.remove("latest")
|
||||
os.symlink(curr_run_dir, "latest")
|
||||
logger.info('Created "%s" directory for current task run' %
|
||||
curr_run_dir)
|
||||
logger.info('Created "%s" directory for current task run' % curr_run_dir)
|
||||
except:
|
||||
logger.exception("")
|
||||
logger.error("Failed to create new run directory in task directory")
|
||||
os.chdir(curr_run_dir)
|
||||
|
||||
# Read task configuration file and check consistency
|
||||
task_conf = ConfigParser(allow_no_value=True,
|
||||
interpolation=ExtendedInterpolation())
|
||||
script_env_vars['PATH']["TASK_NAME"] = "/".join(taskname)
|
||||
script_env_vars['PATH']["TASK_DIR"] = curr_task_dir
|
||||
task_conf = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation())
|
||||
script_env_vars["PATH"]["TASK_NAME"] = "/".join(taskname)
|
||||
script_env_vars["PATH"]["TASK_DIR"] = curr_task_dir
|
||||
task_conf.read_dict(script_env_vars)
|
||||
task_conf.read_file(open(curr_task_conf_file))
|
||||
|
||||
required_sec = ["GENERAL", "BENCHMARKS", "ARCHITECTURES"]
|
||||
missing_section = list(set(required_sec)-set(task_conf.sections()))
|
||||
missing_section = list(set(required_sec) - set(task_conf.sections()))
|
||||
if missing_section:
|
||||
clean_up_and_exit("Missing sections %s" % " ".join(missing_section) +
|
||||
" in task configuration file")
|
||||
clean_up_and_exit(
|
||||
"Missing sections %s" % " ".join(missing_section) + " in task configuration file"
|
||||
)
|
||||
|
||||
# Declare varibles to access sections
|
||||
TaskFileSections = task_conf.sections()
|
||||
|
@ -233,14 +250,12 @@ def generate_each_task_actions(taskname):
|
|||
if os.path.isfile(arch_full_path):
|
||||
archfile_list.append(arch_full_path)
|
||||
else:
|
||||
clean_up_and_exit("Architecture file not found: " +
|
||||
"%s " % arch_file)
|
||||
clean_up_and_exit("Architecture file not found: " + "%s " % arch_file)
|
||||
if not len(archfile_list) == len(list(set(archfile_list))):
|
||||
clean_up_and_exit("Found duplicate architectures in config file")
|
||||
|
||||
# Get Flow information
|
||||
logger.info('Running "%s" flow' %
|
||||
GeneralSection.get("fpga_flow", fallback="yosys_vpr"))
|
||||
logger.info('Running "%s" flow' % GeneralSection.get("fpga_flow", fallback="yosys_vpr"))
|
||||
|
||||
# Check if specified benchmark files exist
|
||||
benchmark_list = []
|
||||
|
@ -253,8 +268,9 @@ def generate_each_task_actions(taskname):
|
|||
for eachpath in each_benchmark.split(","):
|
||||
files = glob.glob(eachpath)
|
||||
if not len(files):
|
||||
clean_up_and_exit(("No files added benchmark %s" % bech_name) +
|
||||
" with path %s " % (eachpath))
|
||||
clean_up_and_exit(
|
||||
("No files added benchmark %s" % bech_name) + " with path %s " % (eachpath)
|
||||
)
|
||||
bench_files += files
|
||||
|
||||
# Read provided benchmark configurations
|
||||
|
@ -268,28 +284,31 @@ def generate_each_task_actions(taskname):
|
|||
|
||||
# Individual benchmark configuration
|
||||
CurrBenchPara["files"] = bench_files
|
||||
CurrBenchPara["top_module"] = SynthSection.get(bech_name+"_top",
|
||||
fallback="top")
|
||||
CurrBenchPara["ys_script"] = SynthSection.get(bech_name+"_yosys",
|
||||
fallback=ys_for_task_common)
|
||||
CurrBenchPara["ys_rewrite_script"] = SynthSection.get(bech_name+"_yosys_rewrite",
|
||||
fallback=ys_rewrite_for_task_common)
|
||||
CurrBenchPara["chan_width"] = SynthSection.get(bech_name+"_chan_width",
|
||||
fallback=chan_width_common)
|
||||
CurrBenchPara["top_module"] = SynthSection.get(bech_name + "_top", fallback="top")
|
||||
CurrBenchPara["ys_script"] = SynthSection.get(
|
||||
bech_name + "_yosys", fallback=ys_for_task_common
|
||||
)
|
||||
CurrBenchPara["ys_rewrite_script"] = SynthSection.get(
|
||||
bech_name + "_yosys_rewrite", fallback=ys_rewrite_for_task_common
|
||||
)
|
||||
CurrBenchPara["chan_width"] = SynthSection.get(
|
||||
bech_name + "_chan_width", fallback=chan_width_common
|
||||
)
|
||||
|
||||
if GeneralSection.get("fpga_flow") == "vpr_blif":
|
||||
# Check if activity file exist
|
||||
if not SynthSection.get(bech_name+"_act"):
|
||||
clean_up_and_exit("Missing argument %s" % (bech_name+"_act") +
|
||||
"for vpr_blif flow")
|
||||
CurrBenchPara["activity_file"] = SynthSection.get(bech_name+"_act")
|
||||
if not SynthSection.get(bech_name + "_act"):
|
||||
clean_up_and_exit(
|
||||
"Missing argument %s" % (bech_name + "_act") + "for vpr_blif flow"
|
||||
)
|
||||
CurrBenchPara["activity_file"] = SynthSection.get(bech_name + "_act")
|
||||
|
||||
# Check if base verilog file exists
|
||||
if not SynthSection.get(bech_name+"_verilog"):
|
||||
clean_up_and_exit("Missing argument %s for vpr_blif flow" %
|
||||
(bech_name+"_verilog"))
|
||||
CurrBenchPara["verilog_file"] = SynthSection.get(
|
||||
bech_name+"_verilog")
|
||||
if not SynthSection.get(bech_name + "_verilog"):
|
||||
clean_up_and_exit(
|
||||
"Missing argument %s for vpr_blif flow" % (bech_name + "_verilog")
|
||||
)
|
||||
CurrBenchPara["verilog_file"] = SynthSection.get(bech_name + "_verilog")
|
||||
|
||||
# Add script parameter list in current benchmark
|
||||
ScriptSections = [x for x in TaskFileSections if "SCRIPT_PARAM" in x]
|
||||
|
@ -297,7 +316,7 @@ def generate_each_task_actions(taskname):
|
|||
for eachset in ScriptSections:
|
||||
command = []
|
||||
for key, values in task_conf[eachset].items():
|
||||
command += ["--"+key, values] if values else ["--"+key]
|
||||
command += ["--" + key, values] if values else ["--" + key]
|
||||
|
||||
# Set label for Sript Parameters
|
||||
set_lbl = eachset.replace("SCRIPT_PARAM", "")
|
||||
|
@ -312,7 +331,7 @@ def generate_each_task_actions(taskname):
|
|||
# which are uniquified
|
||||
benchmark_top_module_count = []
|
||||
for bench in benchmark_list:
|
||||
benchmark_top_module_count.append(bench["top_module"])
|
||||
benchmark_top_module_count.append(bench["top_module"])
|
||||
|
||||
# Create OpenFPGA flow run commnad for each combination of
|
||||
# architecture, benchmark and parameters
|
||||
|
@ -321,38 +340,49 @@ def generate_each_task_actions(taskname):
|
|||
for indx, arch in enumerate(archfile_list):
|
||||
for bench in benchmark_list:
|
||||
for lbl, param in bench["script_params"].items():
|
||||
if (benchmark_top_module_count.count(bench["top_module"]) > 1):
|
||||
flow_run_dir = get_flow_rundir(arch, "bench" + str(benchmark_list.index(bench)) + "_" + bench["top_module"], lbl)
|
||||
if benchmark_top_module_count.count(bench["top_module"]) > 1:
|
||||
flow_run_dir = get_flow_rundir(
|
||||
arch,
|
||||
"bench" + str(benchmark_list.index(bench)) + "_" + bench["top_module"],
|
||||
lbl,
|
||||
)
|
||||
else:
|
||||
flow_run_dir = get_flow_rundir(arch, bench["top_module"], lbl)
|
||||
|
||||
flow_run_dir = get_flow_rundir(arch, bench["top_module"], lbl)
|
||||
|
||||
command = create_run_command(
|
||||
curr_job_dir=flow_run_dir,
|
||||
archfile=arch,
|
||||
benchmark_obj=bench,
|
||||
param=param,
|
||||
task_conf=task_conf)
|
||||
flow_run_cmd_list.append({
|
||||
"arch": arch,
|
||||
"bench": bench,
|
||||
"name": "%02d_%s_%s" % (indx, bench["top_module"], lbl),
|
||||
"run_dir": flow_run_dir,
|
||||
"commands": command,
|
||||
"finished": False,
|
||||
"status": False})
|
||||
task_conf=task_conf,
|
||||
)
|
||||
flow_run_cmd_list.append(
|
||||
{
|
||||
"arch": arch,
|
||||
"bench": bench,
|
||||
"name": "%02d_%s_%s" % (indx, bench["top_module"], lbl),
|
||||
"run_dir": flow_run_dir,
|
||||
"commands": command,
|
||||
"finished": False,
|
||||
"status": False,
|
||||
}
|
||||
)
|
||||
|
||||
logger.info('Found %d Architectures %d Benchmarks & %d Script Parameters' %
|
||||
(len(archfile_list), len(benchmark_list), len(ScriptSections)))
|
||||
logger.info('Created total %d jobs' % len(flow_run_cmd_list))
|
||||
logger.info(
|
||||
"Found %d Architectures %d Benchmarks & %d Script Parameters"
|
||||
% (len(archfile_list), len(benchmark_list), len(ScriptSections))
|
||||
)
|
||||
logger.info("Created total %d jobs" % len(flow_run_cmd_list))
|
||||
return flow_run_cmd_list
|
||||
|
||||
|
||||
# Make the directory name unique by including the benchmark index in the list.
|
||||
# This is because benchmarks may share the same top module names
|
||||
def get_flow_rundir(arch, top_module, flow_params=None):
|
||||
path = [
|
||||
os.path.basename(arch).replace(".xml", ""),
|
||||
top_module,
|
||||
flow_params if flow_params else "common"
|
||||
flow_params if flow_params else "common",
|
||||
]
|
||||
return os.path.abspath(os.path.join(*path))
|
||||
|
||||
|
@ -372,8 +402,8 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf):
|
|||
if os.path.isdir(curr_job_dir):
|
||||
question = "One the result directory already exist.\n"
|
||||
question += "%s\n" % curr_job_dir
|
||||
reply = str(input(question+' (y/n): ')).lower().strip()
|
||||
if reply[:1] in ['y', 'yes']:
|
||||
reply = str(input(question + " (y/n): ")).lower().strip()
|
||||
if reply[:1] in ["y", "yes"]:
|
||||
shutil.rmtree(curr_job_dir)
|
||||
else:
|
||||
logger.info("Result directory removal denied by the user")
|
||||
|
@ -392,8 +422,7 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf):
|
|||
|
||||
if task_gc.get("run_engine") == "openfpga_shell":
|
||||
for eachKey in task_OFPGAc.keys():
|
||||
command += [f"--{eachKey}",
|
||||
task_OFPGAc.get(f"{eachKey}")]
|
||||
command += [f"--{eachKey}", task_OFPGAc.get(f"{eachKey}")]
|
||||
|
||||
if benchmark_obj.get("activity_file"):
|
||||
command += ["--activity_file", benchmark_obj.get("activity_file")]
|
||||
|
@ -433,8 +462,7 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf):
|
|||
def strip_child_logger_info(line):
|
||||
try:
|
||||
logtype, message = line.split(" - ", 1)
|
||||
lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30,
|
||||
"INFO": 20, "DEBUG": 10, "NOTSET": 0}
|
||||
lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30, "INFO": 20, "DEBUG": 10, "NOTSET": 0}
|
||||
logger.log(lognumb[logtype.strip().upper()], message)
|
||||
except:
|
||||
logger.info(line)
|
||||
|
@ -446,18 +474,22 @@ def run_single_script(s, eachJob, job_list):
|
|||
eachJob["starttime"] = time.time()
|
||||
try:
|
||||
logfile = "%s_out.log" % thread_name
|
||||
with open(logfile, 'w+') as output:
|
||||
output.write("* "*20 + '\n')
|
||||
with open(logfile, "w+") as output:
|
||||
output.write("* " * 20 + "\n")
|
||||
output.write("RunDirectory : %s\n" % os.getcwd())
|
||||
command = [os.getenv('PYTHON_EXEC', gc["python_path"]), gc["script_default"]] + \
|
||||
eachJob["commands"]
|
||||
output.write(" ".join(command) + '\n')
|
||||
output.write("* "*20 + '\n')
|
||||
command = [
|
||||
os.getenv("PYTHON_EXEC", gc["python_path"]),
|
||||
gc["script_default"],
|
||||
] + eachJob["commands"]
|
||||
output.write(" ".join(command) + "\n")
|
||||
output.write("* " * 20 + "\n")
|
||||
logger.debug("Running OpenFPGA flow with [%s]" % command)
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True,
|
||||
)
|
||||
for line in process.stdout:
|
||||
if args.show_thread_logs:
|
||||
strip_child_logger_info(line[:-1])
|
||||
|
@ -468,16 +500,16 @@ def run_single_script(s, eachJob, job_list):
|
|||
raise subprocess.CalledProcessError(0, " ".join(command))
|
||||
eachJob["status"] = True
|
||||
except:
|
||||
logger.exception("Failed to execute openfpga flow - " +
|
||||
eachJob["name"])
|
||||
logger.exception("Failed to execute openfpga flow - " + eachJob["name"])
|
||||
if not args.continue_on_fail:
|
||||
os._exit(1)
|
||||
eachJob["endtime"] = time.time()
|
||||
timediff = timedelta(seconds=(eachJob["endtime"]-eachJob["starttime"]))
|
||||
timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules \
|
||||
else str(timediff)
|
||||
logger.info("%s Finished with returncode %d, Time Taken %s " %
|
||||
(thread_name, process.returncode, timestr))
|
||||
timediff = timedelta(seconds=(eachJob["endtime"] - eachJob["starttime"]))
|
||||
timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules else str(timediff)
|
||||
logger.info(
|
||||
"%s Finished with returncode %d, Time Taken %s "
|
||||
% (thread_name, process.returncode, timestr)
|
||||
)
|
||||
eachJob["finished"] = True
|
||||
no_of_finished_job = sum([not eachJ["finished"] for eachJ in job_list])
|
||||
logger.info("***** %d runs pending *****" % (no_of_finished_job))
|
||||
|
@ -487,8 +519,9 @@ def run_actions(job_list):
|
|||
thread_sema = threading.Semaphore(args.maxthreads)
|
||||
thread_list = []
|
||||
for _, eachjob in enumerate(job_list):
|
||||
t = threading.Thread(target=run_single_script, name=eachjob["name"],
|
||||
args=(thread_sema, eachjob, job_list))
|
||||
t = threading.Thread(
|
||||
target=run_single_script, name=eachjob["name"], args=(thread_sema, eachjob, job_list)
|
||||
)
|
||||
t.start()
|
||||
thread_list.append(t)
|
||||
for eachthread in thread_list:
|
||||
|
@ -506,22 +539,19 @@ def collect_results(job_run_list):
|
|||
logger.info("No result files found for %s" % run["name"])
|
||||
|
||||
# Read and merge result file
|
||||
vpr_res = ConfigParser(allow_no_value=True,
|
||||
interpolation=ExtendedInterpolation())
|
||||
vpr_res.read_file(
|
||||
open(os.path.join(run["run_dir"], "vpr_stat.result")))
|
||||
vpr_res = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation())
|
||||
vpr_res.read_file(open(os.path.join(run["run_dir"], "vpr_stat.result")))
|
||||
result = OrderedDict()
|
||||
result["name"] = run["name"]
|
||||
result["TotalRunTime"] = int(run["endtime"]-run["starttime"])
|
||||
result["TotalRunTime"] = int(run["endtime"] - run["starttime"])
|
||||
result.update(vpr_res["RESULTS"])
|
||||
task_result.append(result)
|
||||
colnames = []
|
||||
for eachLbl in task_result:
|
||||
colnames.extend(eachLbl.keys())
|
||||
if len(task_result):
|
||||
with open("task_result.csv", 'w', newline='') as csvfile:
|
||||
writer = csv.DictWriter(
|
||||
csvfile, extrasaction='ignore', fieldnames=list(set(colnames)))
|
||||
with open("task_result.csv", "w", newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, extrasaction="ignore", fieldnames=list(set(colnames)))
|
||||
writer.writeheader()
|
||||
for eachResult in task_result:
|
||||
writer.writerow(eachResult)
|
||||
|
|
|
@ -11,26 +11,26 @@ from configparser import ConfigParser
|
|||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Configure logging system
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
|
||||
format='%(levelname)s (%(threadName)10s) - %(message)s')
|
||||
logger = logging.getLogger('Modelsim_run_log')
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, stream=sys.stdout, format="%(levelname)s (%(threadName)10s) - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger("Modelsim_run_log")
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Parse commandline arguments
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('files', nargs='+')
|
||||
parser.add_argument('--formality_template', type=str,
|
||||
help="Modelsim verification template file")
|
||||
parser.add_argument('--run_sim', action="store_true",
|
||||
help="Execute generated script in formality")
|
||||
parser.add_argument("files", nargs="+")
|
||||
parser.add_argument("--formality_template", type=str, help="Modelsim verification template file")
|
||||
parser.add_argument("--run_sim", action="store_true", help="Execute generated script in formality")
|
||||
args = parser.parse_args()
|
||||
|
||||
# Consider default formality script template
|
||||
if not args.formality_template:
|
||||
task_script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
args.formality_template = os.path.join(task_script_dir, os.pardir,
|
||||
"misc", "formality_template.tcl")
|
||||
args.formality_template = os.path.join(
|
||||
task_script_dir, os.pardir, "misc", "formality_template.tcl"
|
||||
)
|
||||
|
||||
args.formality_template = os.path.abspath(args.formality_template)
|
||||
|
||||
|
@ -44,56 +44,53 @@ def main():
|
|||
config = ConfigParser()
|
||||
config.read(eachFile)
|
||||
|
||||
port_map = ("set_user_match r:%s/%%s i:/WORK/%%s -type port -noninverted" % (
|
||||
port_map = "set_user_match r:%s/%%s i:/WORK/%%s -type port -noninverted" % (
|
||||
"/WORK/" + config["BENCHMARK_INFO"]["src_top_module"]
|
||||
))
|
||||
cell_map = ("set_user_match r:%s/%%s i:/WORK/%%s -type cell -noninverted" % (
|
||||
)
|
||||
cell_map = "set_user_match r:%s/%%s i:/WORK/%%s -type cell -noninverted" % (
|
||||
"/WORK/" + config["BENCHMARK_INFO"]["src_top_module"]
|
||||
))
|
||||
)
|
||||
|
||||
lables = {
|
||||
"SOURCE_DESIGN_FILES": config["BENCHMARK_INFO"]["benchmark_netlist"],
|
||||
"SOURCE_TOP_MODULE": "/WORK/" + config["BENCHMARK_INFO"]["src_top_module"],
|
||||
|
||||
"IMPL_DESIGN_FILES": " ".join(
|
||||
[val for key, val in config["FPGA_INFO"].items()
|
||||
if "impl_netlist_" in key]),
|
||||
[val for key, val in config["FPGA_INFO"].items() if "impl_netlist_" in key]
|
||||
),
|
||||
"IMPL_TOP_DIR": "/WORK/" + config["FPGA_INFO"]["impl_top_module"],
|
||||
|
||||
"PORT_MAP_LIST": "\n".join([port_map %
|
||||
ele for ele in
|
||||
config["PORT_MATCHING"].items()]),
|
||||
"REGISTER_MAP_LIST": "\n".join([cell_map %
|
||||
ele for ele in
|
||||
config["REGISTER_MATCH"].items()]),
|
||||
"PORT_MAP_LIST": "\n".join([port_map % ele for ele in config["PORT_MATCHING"].items()]),
|
||||
"REGISTER_MAP_LIST": "\n".join(
|
||||
[cell_map % ele for ele in config["REGISTER_MATCH"].items()]
|
||||
),
|
||||
}
|
||||
|
||||
tmpl = Template(open(args.formality_template, encoding='utf-8').read())
|
||||
with open(os.path.join(pDir, "Output.tcl"), 'w', encoding='utf-8') as tclout:
|
||||
tmpl = Template(open(args.formality_template, encoding="utf-8").read())
|
||||
with open(os.path.join(pDir, "Output.tcl"), "w", encoding="utf-8") as tclout:
|
||||
tclout.write(tmpl.substitute(lables))
|
||||
if args.run_sim:
|
||||
formality_run_string = ["formality", "-file", "Output.tcl"]
|
||||
run_command("Formality Run", "formality_run.log", formality_run_string)
|
||||
else:
|
||||
with open("Output.tcl", 'r', encoding='utf-8') as tclout:
|
||||
with open("Output.tcl", "r", encoding="utf-8") as tclout:
|
||||
print(tclout.read())
|
||||
|
||||
|
||||
def run_command(taskname, logfile, command, exit_if_fail=True):
|
||||
os.chdir(os.pardir)
|
||||
logger.info("Launching %s " % taskname)
|
||||
with open(logfile, 'w+') as output:
|
||||
with open(logfile, "w+") as output:
|
||||
try:
|
||||
output.write(" ".join(command)+"\n")
|
||||
process = subprocess.run(command,
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
universal_newlines=True)
|
||||
output.write(" ".join(command) + "\n")
|
||||
process = subprocess.run(
|
||||
command,
|
||||
check=True,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
universal_newlines=True,
|
||||
)
|
||||
output.write(process.stdout)
|
||||
if process.returncode:
|
||||
logger.error("%s run failed with returncode %d" %
|
||||
(taskname, process.returncode))
|
||||
logger.error("%s run failed with returncode %d" % (taskname, process.returncode))
|
||||
except (Exception, subprocess.CalledProcessError) as e:
|
||||
logger.exception("failed to execute %s" % taskname)
|
||||
return None
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -39,55 +39,62 @@ if sys.version_info[0] < 3:
|
|||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
LOG_FORMAT = "%(levelname)5s (%(threadName)15s) - %(message)s"
|
||||
if util.find_spec("coloredlogs"):
|
||||
coloredlogs.install(level='INFO', stream=sys.stdout,
|
||||
fmt=LOG_FORMAT)
|
||||
coloredlogs.install(level="INFO", stream=sys.stdout, fmt=LOG_FORMAT)
|
||||
else:
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
|
||||
format=LOG_FORMAT)
|
||||
logger = logging.getLogger('OpenFPGA_Task_logs')
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout, format=LOG_FORMAT)
|
||||
logger = logging.getLogger("OpenFPGA_Task_logs")
|
||||
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Read commandline arguments
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('tasks', nargs='+')
|
||||
parser.add_argument('--maxthreads', type=int, default=2,
|
||||
help="Number of fpga_flow threads to run default = 2," +
|
||||
"Typically <= Number of processors on the system")
|
||||
parser.add_argument('--remove_run_dir', type=str,
|
||||
help="Remove run dir " +
|
||||
"'all' to remove all." +
|
||||
"<int>,<int> to remove specific run dir" +
|
||||
"<int>-<int> To remove range of directory")
|
||||
parser.add_argument('--config', help="Override default configuration")
|
||||
parser.add_argument('--test_run', action="store_true",
|
||||
help="Dummy run shows final generated VPR commands")
|
||||
parser.add_argument('--debug', action="store_true",
|
||||
help="Run script in debug mode")
|
||||
parser.add_argument('--continue_on_fail', action="store_true",
|
||||
help="Exit script with return code")
|
||||
parser.add_argument('--show_thread_logs', action="store_true",
|
||||
help="Skips logs from running thread")
|
||||
parser.add_argument("tasks", nargs="+")
|
||||
parser.add_argument(
|
||||
"--maxthreads",
|
||||
type=int,
|
||||
default=2,
|
||||
help="Number of fpga_flow threads to run default = 2,"
|
||||
+ "Typically <= Number of processors on the system",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remove_run_dir",
|
||||
type=str,
|
||||
help="Remove run dir "
|
||||
+ "'all' to remove all."
|
||||
+ "<int>,<int> to remove specific run dir"
|
||||
+ "<int>-<int> To remove range of directory",
|
||||
)
|
||||
parser.add_argument("--config", help="Override default configuration")
|
||||
parser.add_argument(
|
||||
"--test_run", action="store_true", help="Dummy run shows final generated VPR commands"
|
||||
)
|
||||
parser.add_argument("--debug", action="store_true", help="Run script in debug mode")
|
||||
parser.add_argument("--continue_on_fail", action="store_true", help="Exit script with return code")
|
||||
parser.add_argument(
|
||||
"--show_thread_logs", action="store_true", help="Skips logs from running thread"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Read script configuration file
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
task_script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
script_env_vars = ({"PATH": {
|
||||
"OPENFPGA_FLOW_PATH": task_script_dir,
|
||||
"ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"),
|
||||
"OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"),
|
||||
"BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"),
|
||||
"TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"),
|
||||
"SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"),
|
||||
"VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"),
|
||||
"OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir,
|
||||
os.pardir))}})
|
||||
script_env_vars = {
|
||||
"PATH": {
|
||||
"OPENFPGA_FLOW_PATH": task_script_dir,
|
||||
"ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"),
|
||||
"OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"),
|
||||
"BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"),
|
||||
"TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"),
|
||||
"SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"),
|
||||
"VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"),
|
||||
"OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)),
|
||||
}
|
||||
}
|
||||
config = ConfigParser(interpolation=ExtendedInterpolation())
|
||||
config.read_dict(script_env_vars)
|
||||
config.read_file(open(os.path.join(task_script_dir, 'run_fpga_task.conf')))
|
||||
config.read_file(open(os.path.join(task_script_dir, "run_fpga_task.conf")))
|
||||
gc = config["GENERAL CONFIGURATION"]
|
||||
|
||||
|
||||
|
@ -109,6 +116,7 @@ def main():
|
|||
logger.info("Task execution completed")
|
||||
exit(0)
|
||||
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Subroutines starts here
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
|
@ -151,14 +159,13 @@ def remove_run_dir():
|
|||
|
||||
try:
|
||||
for eachdir in remove_dir:
|
||||
logger.info('Removing run_dir %s' % (eachdir))
|
||||
if os.path.exists('latest'):
|
||||
if eachdir == os.readlink('latest'):
|
||||
logger.info("Removing run_dir %s" % (eachdir))
|
||||
if os.path.exists("latest"):
|
||||
if eachdir == os.readlink("latest"):
|
||||
remove_dir += ["latest"]
|
||||
shutil.rmtree(eachdir, ignore_errors=True)
|
||||
except:
|
||||
logger.exception("Failed to remove %s run directory" %
|
||||
(eachdir or "Unknown"))
|
||||
logger.exception("Failed to remove %s run directory" % (eachdir or "Unknown"))
|
||||
|
||||
|
||||
def generate_each_task_actions(taskname):
|
||||
|
@ -169,7 +176,7 @@ def generate_each_task_actions(taskname):
|
|||
# Check if task directory exists and consistent
|
||||
local_tasks = os.path.join(*(taskname))
|
||||
repo_tasks = os.path.join(gc["task_dir"], *(taskname))
|
||||
abs_tasks = os.path.abspath('/' + local_tasks)
|
||||
abs_tasks = os.path.abspath("/" + local_tasks)
|
||||
if os.path.isdir(local_tasks):
|
||||
os.chdir(local_tasks)
|
||||
curr_task_dir = os.path.abspath(os.getcwd())
|
||||
|
@ -178,52 +185,60 @@ def generate_each_task_actions(taskname):
|
|||
elif os.path.isdir(repo_tasks):
|
||||
curr_task_dir = repo_tasks
|
||||
else:
|
||||
clean_up_and_exit("Task directory [%s] not found" % taskname +
|
||||
" locally at [%s]" % local_tasks +
|
||||
", absolutely at [%s]" % abs_tasks +
|
||||
", or in OpenFPGA task directory [%s]" % repo_tasks)
|
||||
clean_up_and_exit(
|
||||
"Task directory [%s] not found" % taskname
|
||||
+ " locally at [%s]" % local_tasks
|
||||
+ ", absolutely at [%s]" % abs_tasks
|
||||
+ ", or in OpenFPGA task directory [%s]" % repo_tasks
|
||||
)
|
||||
|
||||
os.chdir(curr_task_dir)
|
||||
|
||||
curr_task_conf_file = os.path.join(curr_task_dir, "config", "task.conf")
|
||||
if not os.path.isfile(curr_task_conf_file):
|
||||
clean_up_and_exit(
|
||||
"Missing configuration file for task %s" % curr_task_dir)
|
||||
clean_up_and_exit("Missing configuration file for task %s" % curr_task_dir)
|
||||
|
||||
if args.remove_run_dir:
|
||||
remove_run_dir()
|
||||
flow_run_cmd_list = []
|
||||
GeneralSection = []
|
||||
return flow_run_cmd_list,GeneralSection
|
||||
return flow_run_cmd_list, GeneralSection
|
||||
|
||||
# Create run directory for current task run ./runxxx
|
||||
run_dirs = [int(os.path.basename(x)[-3:]) for x in glob.glob('run*[0-9]')]
|
||||
curr_run_dir = "run%03d" % (max(run_dirs+[0, ])+1)
|
||||
run_dirs = [int(os.path.basename(x)[-3:]) for x in glob.glob("run*[0-9]")]
|
||||
curr_run_dir = "run%03d" % (
|
||||
max(
|
||||
run_dirs
|
||||
+ [
|
||||
0,
|
||||
]
|
||||
)
|
||||
+ 1
|
||||
)
|
||||
try:
|
||||
os.mkdir(curr_run_dir)
|
||||
if os.path.islink('latest') or os.path.exists('latest'):
|
||||
if os.path.islink("latest") or os.path.exists("latest"):
|
||||
os.remove("latest")
|
||||
os.symlink(curr_run_dir, "latest")
|
||||
logger.info('Created "%s" directory for current task run' %
|
||||
curr_run_dir)
|
||||
logger.info('Created "%s" directory for current task run' % curr_run_dir)
|
||||
except:
|
||||
logger.exception("")
|
||||
logger.error("Failed to create new run directory in task directory")
|
||||
os.chdir(curr_run_dir)
|
||||
|
||||
# Read task configuration file and check consistency
|
||||
task_conf = ConfigParser(allow_no_value=True,
|
||||
interpolation=ExtendedInterpolation())
|
||||
script_env_vars['PATH']["TASK_NAME"] = "/".join(taskname)
|
||||
script_env_vars['PATH']["TASK_DIR"] = curr_task_dir
|
||||
task_conf = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation())
|
||||
script_env_vars["PATH"]["TASK_NAME"] = "/".join(taskname)
|
||||
script_env_vars["PATH"]["TASK_DIR"] = curr_task_dir
|
||||
task_conf.read_dict(script_env_vars)
|
||||
task_conf.read_file(open(curr_task_conf_file))
|
||||
|
||||
required_sec = ["GENERAL", "BENCHMARKS", "ARCHITECTURES"]
|
||||
missing_section = list(set(required_sec)-set(task_conf.sections()))
|
||||
missing_section = list(set(required_sec) - set(task_conf.sections()))
|
||||
if missing_section:
|
||||
clean_up_and_exit("Missing sections %s" % " ".join(missing_section) +
|
||||
" in task configuration file")
|
||||
clean_up_and_exit(
|
||||
"Missing sections %s" % " ".join(missing_section) + " in task configuration file"
|
||||
)
|
||||
|
||||
# Declare varibles to access sections
|
||||
TaskFileSections = task_conf.sections()
|
||||
|
@ -238,14 +253,12 @@ def generate_each_task_actions(taskname):
|
|||
if os.path.isfile(arch_full_path):
|
||||
archfile_list.append(arch_full_path)
|
||||
else:
|
||||
clean_up_and_exit("Architecture file not found: " +
|
||||
"%s " % arch_file)
|
||||
clean_up_and_exit("Architecture file not found: " + "%s " % arch_file)
|
||||
if not len(archfile_list) == len(list(set(archfile_list))):
|
||||
clean_up_and_exit("Found duplicate architectures in config file")
|
||||
|
||||
# Get Flow information
|
||||
logger.info('Running "%s" flow',
|
||||
GeneralSection.get("fpga_flow", fallback="yosys_vpr"))
|
||||
logger.info('Running "%s" flow', GeneralSection.get("fpga_flow", fallback="yosys_vpr"))
|
||||
|
||||
# Check if specified benchmark files exist
|
||||
benchmark_list = []
|
||||
|
@ -258,8 +271,9 @@ def generate_each_task_actions(taskname):
|
|||
for eachpath in each_benchmark.split(","):
|
||||
files = glob.glob(eachpath)
|
||||
if not len(files):
|
||||
clean_up_and_exit(("No files added benchmark %s" % bech_name) +
|
||||
" with path %s " % (eachpath))
|
||||
clean_up_and_exit(
|
||||
("No files added benchmark %s" % bech_name) + " with path %s " % (eachpath)
|
||||
)
|
||||
bench_files += files
|
||||
|
||||
# Read provided benchmark configurations
|
||||
|
@ -289,53 +303,56 @@ def generate_each_task_actions(taskname):
|
|||
"verific_vhdl_standard",
|
||||
"verific_include_dir",
|
||||
"verific_library_dir",
|
||||
"verific_search_lib"
|
||||
"verific_search_lib",
|
||||
]
|
||||
|
||||
yosys_params_common = {}
|
||||
for param in yosys_params:
|
||||
yosys_params_common[param.upper()] = SynthSection.get("bench_"+param+"_common")
|
||||
yosys_params_common[param.upper()] = SynthSection.get("bench_" + param + "_common")
|
||||
|
||||
# Individual benchmark configuration
|
||||
CurrBenchPara["files"] = bench_files
|
||||
CurrBenchPara["top_module"] = SynthSection.get(bech_name+"_top",
|
||||
fallback="top")
|
||||
CurrBenchPara["ys_script"] = SynthSection.get(bech_name+"_yosys",
|
||||
fallback=ys_for_task_common)
|
||||
CurrBenchPara["ys_rewrite_script"] = SynthSection.get(bech_name+"_yosys_rewrite",
|
||||
fallback=ys_rewrite_for_task_common)
|
||||
CurrBenchPara["chan_width"] = SynthSection.get(bech_name+"_chan_width",
|
||||
fallback=chan_width_common)
|
||||
CurrBenchPara["top_module"] = SynthSection.get(bech_name + "_top", fallback="top")
|
||||
CurrBenchPara["ys_script"] = SynthSection.get(
|
||||
bech_name + "_yosys", fallback=ys_for_task_common
|
||||
)
|
||||
CurrBenchPara["ys_rewrite_script"] = SynthSection.get(
|
||||
bech_name + "_yosys_rewrite", fallback=ys_rewrite_for_task_common
|
||||
)
|
||||
CurrBenchPara["chan_width"] = SynthSection.get(
|
||||
bech_name + "_chan_width", fallback=chan_width_common
|
||||
)
|
||||
CurrBenchPara["benchVariable"] = []
|
||||
for eachKey, eachValue in SynthSection.items():
|
||||
if bech_name in eachKey:
|
||||
eachKey = eachKey.replace(bech_name+"_", "").upper()
|
||||
eachKey = eachKey.replace(bech_name + "_", "").upper()
|
||||
CurrBenchPara["benchVariable"] += [f"--{eachKey}", eachValue]
|
||||
|
||||
|
||||
for param, value in yosys_params_common.items():
|
||||
if not param in CurrBenchPara["benchVariable"] and value:
|
||||
CurrBenchPara["benchVariable"] += [f"--{param}", value]
|
||||
|
||||
if GeneralSection.get("fpga_flow") == "vpr_blif":
|
||||
# Check if activity file exist only when power analysis is required
|
||||
if (GeneralSection.getboolean("power_analysis")):
|
||||
if not SynthSection.get(bech_name+"_act"):
|
||||
clean_up_and_exit("Missing argument %s" % (bech_name+"_act") +
|
||||
"for vpr_blif flow")
|
||||
CurrBenchPara["activity_file"] = SynthSection.get(bech_name+"_act")
|
||||
if GeneralSection.getboolean("power_analysis"):
|
||||
if not SynthSection.get(bech_name + "_act"):
|
||||
clean_up_and_exit(
|
||||
"Missing argument %s" % (bech_name + "_act") + "for vpr_blif flow"
|
||||
)
|
||||
CurrBenchPara["activity_file"] = SynthSection.get(bech_name + "_act")
|
||||
else:
|
||||
# If users defined an activity file, we use it otherwise create a dummy act
|
||||
if not SynthSection.get(bech_name+"_act"):
|
||||
CurrBenchPara["activity_file"] = bech_name+"_act"
|
||||
if not SynthSection.get(bech_name + "_act"):
|
||||
CurrBenchPara["activity_file"] = bech_name + "_act"
|
||||
else:
|
||||
CurrBenchPara["activity_file"] = SynthSection.get(bech_name+"_act")
|
||||
CurrBenchPara["activity_file"] = SynthSection.get(bech_name + "_act")
|
||||
|
||||
# Check if base verilog file exists
|
||||
if not SynthSection.get(bech_name+"_verilog"):
|
||||
clean_up_and_exit("Missing argument %s for vpr_blif flow" %
|
||||
(bech_name+"_verilog"))
|
||||
CurrBenchPara["verilog_file"] = SynthSection.get(
|
||||
bech_name+"_verilog")
|
||||
if not SynthSection.get(bech_name + "_verilog"):
|
||||
clean_up_and_exit(
|
||||
"Missing argument %s for vpr_blif flow" % (bech_name + "_verilog")
|
||||
)
|
||||
CurrBenchPara["verilog_file"] = SynthSection.get(bech_name + "_verilog")
|
||||
|
||||
# Add script parameter list in current benchmark
|
||||
ScriptSections = [x for x in TaskFileSections if "SCRIPT_PARAM" in x]
|
||||
|
@ -343,7 +360,7 @@ def generate_each_task_actions(taskname):
|
|||
for eachset in ScriptSections:
|
||||
command = []
|
||||
for key, values in task_conf[eachset].items():
|
||||
command += ["--"+key, values] if values else ["--"+key]
|
||||
command += ["--" + key, values] if values else ["--" + key]
|
||||
|
||||
# Set label for Sript Parameters
|
||||
set_lbl = eachset.replace("SCRIPT_PARAM", "")
|
||||
|
@ -358,7 +375,7 @@ def generate_each_task_actions(taskname):
|
|||
# which are uniquified
|
||||
benchmark_top_module_count = []
|
||||
for bench in benchmark_list:
|
||||
benchmark_top_module_count.append(bench["top_module"])
|
||||
benchmark_top_module_count.append(bench["top_module"])
|
||||
|
||||
# Create OpenFPGA flow run commnad for each combination of
|
||||
# architecture, benchmark and parameters
|
||||
|
@ -367,31 +384,42 @@ def generate_each_task_actions(taskname):
|
|||
for indx, arch in enumerate(archfile_list):
|
||||
for bench in benchmark_list:
|
||||
for lbl, param in bench["script_params"].items():
|
||||
if (benchmark_top_module_count.count(bench["top_module"]) > 1):
|
||||
flow_run_dir = get_flow_rundir(arch, "bench" + str(benchmark_list.index(bench)) + "_" + bench["top_module"], lbl)
|
||||
if benchmark_top_module_count.count(bench["top_module"]) > 1:
|
||||
flow_run_dir = get_flow_rundir(
|
||||
arch,
|
||||
"bench" + str(benchmark_list.index(bench)) + "_" + bench["top_module"],
|
||||
lbl,
|
||||
)
|
||||
else:
|
||||
flow_run_dir = get_flow_rundir(arch, bench["top_module"], lbl)
|
||||
flow_run_dir = get_flow_rundir(arch, bench["top_module"], lbl)
|
||||
|
||||
command = create_run_command(
|
||||
curr_job_dir=flow_run_dir,
|
||||
archfile=arch,
|
||||
benchmark_obj=bench,
|
||||
param=param,
|
||||
task_conf=task_conf)
|
||||
flow_run_cmd_list.append({
|
||||
"arch": arch,
|
||||
"bench": bench,
|
||||
"name": "%02d_%s_%s" % (indx, bench["top_module"], lbl),
|
||||
"run_dir": flow_run_dir,
|
||||
"commands": command + bench["benchVariable"],
|
||||
"finished": False,
|
||||
"status": False})
|
||||
task_conf=task_conf,
|
||||
)
|
||||
flow_run_cmd_list.append(
|
||||
{
|
||||
"arch": arch,
|
||||
"bench": bench,
|
||||
"name": "%02d_%s_%s" % (indx, bench["top_module"], lbl),
|
||||
"run_dir": flow_run_dir,
|
||||
"commands": command + bench["benchVariable"],
|
||||
"finished": False,
|
||||
"status": False,
|
||||
}
|
||||
)
|
||||
|
||||
logger.info('Found %d Architectures %d Benchmarks & %d Script Parameters' %
|
||||
(len(archfile_list), len(benchmark_list), len(ScriptSections)))
|
||||
logger.info('Created total %d jobs' % len(flow_run_cmd_list))
|
||||
logger.info(
|
||||
"Found %d Architectures %d Benchmarks & %d Script Parameters"
|
||||
% (len(archfile_list), len(benchmark_list), len(ScriptSections))
|
||||
)
|
||||
logger.info("Created total %d jobs" % len(flow_run_cmd_list))
|
||||
|
||||
return flow_run_cmd_list, GeneralSection
|
||||
|
||||
return flow_run_cmd_list,GeneralSection
|
||||
|
||||
# Make the directory name unique by including the benchmark index in the list.
|
||||
# This is because benchmarks may share the same top module names
|
||||
|
@ -401,7 +429,7 @@ def get_flow_rundir(arch, top_module, flow_params=None):
|
|||
path = [
|
||||
os.path.basename(arch).replace(".xml", ""),
|
||||
top_module,
|
||||
flow_params if flow_params else "common"
|
||||
flow_params if flow_params else "common",
|
||||
]
|
||||
return os.path.abspath(os.path.join(*path))
|
||||
|
||||
|
@ -421,8 +449,8 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf):
|
|||
if os.path.isdir(curr_job_dir):
|
||||
question = "One the result directory already exist.\n"
|
||||
question += "%s\n" % curr_job_dir
|
||||
reply = str(input(question+' (y/n): ')).lower().strip()
|
||||
if reply[:1] in ['y', 'yes']:
|
||||
reply = str(input(question + " (y/n): ")).lower().strip()
|
||||
if reply[:1] in ["y", "yes"]:
|
||||
shutil.rmtree(curr_job_dir)
|
||||
else:
|
||||
logger.info("Result directory removal denied by the user")
|
||||
|
@ -444,8 +472,7 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf):
|
|||
|
||||
if task_gc.get("run_engine") == "openfpga_shell":
|
||||
for eachKey in task_OFPGAc.keys():
|
||||
command += [f"--{eachKey}",
|
||||
task_OFPGAc.get(f"{eachKey}")]
|
||||
command += [f"--{eachKey}", task_OFPGAc.get(f"{eachKey}")]
|
||||
|
||||
if benchmark_obj.get("activity_file"):
|
||||
command += ["--activity_file", benchmark_obj.get("activity_file")]
|
||||
|
@ -485,8 +512,7 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf):
|
|||
def strip_child_logger_info(line):
|
||||
try:
|
||||
logtype, message = line.split(" - ", 1)
|
||||
lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30,
|
||||
"INFO": 20, "DEBUG": 10, "NOTSET": 0}
|
||||
lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30, "INFO": 20, "DEBUG": 10, "NOTSET": 0}
|
||||
logger.log(lognumb[logtype.strip().upper()], message)
|
||||
except:
|
||||
logger.info(line)
|
||||
|
@ -498,18 +524,22 @@ def run_single_script(s, eachJob, job_list):
|
|||
eachJob["starttime"] = time.time()
|
||||
try:
|
||||
logfile = "%s_out.log" % thread_name
|
||||
with open(logfile, 'w+') as output:
|
||||
output.write("* "*20 + '\n')
|
||||
with open(logfile, "w+") as output:
|
||||
output.write("* " * 20 + "\n")
|
||||
output.write("RunDirectory : %s\n" % os.getcwd())
|
||||
command = [os.getenv('PYTHON_EXEC', gc["python_path"]), gc["script_default"]] + \
|
||||
eachJob["commands"]
|
||||
output.write(" ".join(command) + '\n')
|
||||
output.write("* "*20 + '\n')
|
||||
command = [
|
||||
os.getenv("PYTHON_EXEC", gc["python_path"]),
|
||||
gc["script_default"],
|
||||
] + eachJob["commands"]
|
||||
output.write(" ".join(command) + "\n")
|
||||
output.write("* " * 20 + "\n")
|
||||
logger.debug("Running OpenFPGA flow with [%s]" % command)
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True,
|
||||
)
|
||||
for line in process.stdout:
|
||||
if args.show_thread_logs:
|
||||
strip_child_logger_info(line[:-1])
|
||||
|
@ -520,27 +550,30 @@ def run_single_script(s, eachJob, job_list):
|
|||
raise subprocess.CalledProcessError(0, " ".join(command))
|
||||
eachJob["status"] = True
|
||||
except:
|
||||
logger.exception("Failed to execute openfpga flow - %s",
|
||||
eachJob["name"])
|
||||
logger.exception("Failed to execute openfpga flow - %s", eachJob["name"])
|
||||
if not args.continue_on_fail:
|
||||
os._exit(1)
|
||||
eachJob["endtime"] = time.time()
|
||||
timediff = timedelta(seconds=(eachJob["endtime"]-eachJob["starttime"]))
|
||||
timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules \
|
||||
else str(timediff)
|
||||
logger.info("%s Finished with returncode %d, Time Taken %s " ,
|
||||
thread_name, process.returncode, timestr)
|
||||
timediff = timedelta(seconds=(eachJob["endtime"] - eachJob["starttime"]))
|
||||
timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules else str(timediff)
|
||||
logger.info(
|
||||
"%s Finished with returncode %d, Time Taken %s ",
|
||||
thread_name,
|
||||
process.returncode,
|
||||
timestr,
|
||||
)
|
||||
eachJob["finished"] = True
|
||||
no_of_finished_job = sum([not eachJ["finished"] for eachJ in job_list])
|
||||
logger.info("***** %d runs pending *****" , no_of_finished_job)
|
||||
logger.info("***** %d runs pending *****", no_of_finished_job)
|
||||
|
||||
|
||||
def run_actions(job_list):
|
||||
thread_sema = threading.Semaphore(args.maxthreads)
|
||||
thread_list = []
|
||||
for _, eachjob in enumerate(job_list):
|
||||
t = threading.Thread(target=run_single_script, name=eachjob["name"],
|
||||
args=(thread_sema, eachjob, job_list))
|
||||
t = threading.Thread(
|
||||
target=run_single_script, name=eachjob["name"], args=(thread_sema, eachjob, job_list)
|
||||
)
|
||||
t.start()
|
||||
thread_list.append(t)
|
||||
for eachthread in thread_list:
|
||||
|
@ -558,22 +591,19 @@ def collect_results(job_run_list):
|
|||
logger.info("No result files found for %s" % run["name"])
|
||||
|
||||
# Read and merge result file
|
||||
vpr_res = ConfigParser(allow_no_value=True,
|
||||
interpolation=ExtendedInterpolation())
|
||||
vpr_res.read_file(
|
||||
open(os.path.join(run["run_dir"], "vpr_stat.result")))
|
||||
vpr_res = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation())
|
||||
vpr_res.read_file(open(os.path.join(run["run_dir"], "vpr_stat.result")))
|
||||
result = OrderedDict()
|
||||
result["name"] = run["name"]
|
||||
result["TotalRunTime"] = int(run["endtime"]-run["starttime"])
|
||||
result["TotalRunTime"] = int(run["endtime"] - run["starttime"])
|
||||
result.update(vpr_res["RESULTS"])
|
||||
task_result.append(result)
|
||||
colnames = []
|
||||
for eachLbl in task_result:
|
||||
colnames.extend(eachLbl.keys())
|
||||
if len(task_result):
|
||||
with open("task_result.csv", 'w', newline='') as csvfile:
|
||||
writer = csv.DictWriter(
|
||||
csvfile, extrasaction='ignore', fieldnames=list(colnames))
|
||||
with open("task_result.csv", "w", newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, extrasaction="ignore", fieldnames=list(colnames))
|
||||
writer.writeheader()
|
||||
for eachResult in task_result:
|
||||
writer.writerow(eachResult)
|
||||
|
|
|
@ -15,57 +15,67 @@ from configparser import ConfigParser, ExtendedInterpolation
|
|||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Configure logging system
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
FILE_LOG_FORMAT = '%(levelname)s (%(threadName)10s) - %(message)s'
|
||||
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
|
||||
format='%(levelname)s (%(threadName)10s) - %(message)s')
|
||||
logger = logging.getLogger('Modelsim_run_log')
|
||||
FILE_LOG_FORMAT = "%(levelname)s (%(threadName)10s) - %(message)s"
|
||||
logging.basicConfig(
|
||||
level=logging.INFO, stream=sys.stdout, format="%(levelname)s (%(threadName)10s) - %(message)s"
|
||||
)
|
||||
logger = logging.getLogger("Modelsim_run_log")
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Parse commandline arguments
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('files', nargs='+',
|
||||
help="Pass SimulationDeckInfo generated by OpenFPGA flow" +
|
||||
" or pass taskname <taskname> <run_number[optional]>")
|
||||
parser.add_argument('--maxthreads', type=int, default=2,
|
||||
help="Number of fpga_flow threads to run default = 2," +
|
||||
"Typically <= Number of processors on the system")
|
||||
parser.add_argument('--debug', action="store_true",
|
||||
help="Run script in debug mode")
|
||||
parser.add_argument('--modelsim_proc_tmpl', type=str,
|
||||
help="Modelsim proc template file")
|
||||
parser.add_argument('--modelsim_runsim_tmpl', type=str,
|
||||
help="Modelsim runsim template file")
|
||||
parser.add_argument('--run_sim', action="store_true",
|
||||
help="Execute generated script in formality")
|
||||
parser.add_argument('--modelsim_proj_name',
|
||||
help="Provide modelsim project name")
|
||||
parser.add_argument('--modelsim_ini', type=str,
|
||||
default="/uusoc/facility/cad_tools/Mentor/modelsim10.7b/modeltech/modelsim.ini",
|
||||
help="Skip any confirmation")
|
||||
parser.add_argument('--skip_prompt', action='store_true',
|
||||
help='Skip any confirmation')
|
||||
parser.add_argument('--ini_filename', type=str,
|
||||
default="simulation_deck_info.ini",
|
||||
help='default INI filename in in fun dir')
|
||||
parser.add_argument(
|
||||
"files",
|
||||
nargs="+",
|
||||
help="Pass SimulationDeckInfo generated by OpenFPGA flow"
|
||||
+ " or pass taskname <taskname> <run_number[optional]>",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--maxthreads",
|
||||
type=int,
|
||||
default=2,
|
||||
help="Number of fpga_flow threads to run default = 2,"
|
||||
+ "Typically <= Number of processors on the system",
|
||||
)
|
||||
parser.add_argument("--debug", action="store_true", help="Run script in debug mode")
|
||||
parser.add_argument("--modelsim_proc_tmpl", type=str, help="Modelsim proc template file")
|
||||
parser.add_argument("--modelsim_runsim_tmpl", type=str, help="Modelsim runsim template file")
|
||||
parser.add_argument("--run_sim", action="store_true", help="Execute generated script in formality")
|
||||
parser.add_argument("--modelsim_proj_name", help="Provide modelsim project name")
|
||||
parser.add_argument(
|
||||
"--modelsim_ini",
|
||||
type=str,
|
||||
default="/uusoc/facility/cad_tools/Mentor/modelsim10.7b/modeltech/modelsim.ini",
|
||||
help="Skip any confirmation",
|
||||
)
|
||||
parser.add_argument("--skip_prompt", action="store_true", help="Skip any confirmation")
|
||||
parser.add_argument(
|
||||
"--ini_filename",
|
||||
type=str,
|
||||
default="simulation_deck_info.ini",
|
||||
help="default INI filename in in fun dir",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
# Read script configuration file
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
task_script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
script_env_vars = ({"PATH": {
|
||||
"OPENFPGA_FLOW_PATH": task_script_dir,
|
||||
"ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"),
|
||||
"BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"),
|
||||
"TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"),
|
||||
"SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"),
|
||||
"VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"),
|
||||
"OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir,
|
||||
os.pardir))}})
|
||||
script_env_vars = {
|
||||
"PATH": {
|
||||
"OPENFPGA_FLOW_PATH": task_script_dir,
|
||||
"ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"),
|
||||
"BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"),
|
||||
"TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"),
|
||||
"SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"),
|
||||
"VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"),
|
||||
"OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)),
|
||||
}
|
||||
}
|
||||
config = ConfigParser(interpolation=ExtendedInterpolation())
|
||||
config.read_dict(script_env_vars)
|
||||
config.read_file(open(os.path.join(task_script_dir, 'run_fpga_task.conf')))
|
||||
config.read_file(open(os.path.join(task_script_dir, "run_fpga_task.conf")))
|
||||
gc = config["GENERAL CONFIGURATION"]
|
||||
|
||||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
|
@ -73,11 +83,11 @@ gc = config["GENERAL CONFIGURATION"]
|
|||
# = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
|
||||
task_script_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
if not args.modelsim_proc_tmpl:
|
||||
args.modelsim_proc_tmpl = os.path.join(task_script_dir, os.pardir,
|
||||
"misc", "modelsim_proc.tcl")
|
||||
args.modelsim_proc_tmpl = os.path.join(task_script_dir, os.pardir, "misc", "modelsim_proc.tcl")
|
||||
if not args.modelsim_runsim_tmpl:
|
||||
args.modelsim_runsim_tmpl = os.path.join(task_script_dir, os.pardir,
|
||||
"misc", "modelsim_runsim.tcl")
|
||||
args.modelsim_runsim_tmpl = os.path.join(
|
||||
task_script_dir, os.pardir, "misc", "modelsim_runsim.tcl"
|
||||
)
|
||||
|
||||
args.modelsim_proc_tmpl = os.path.abspath(args.modelsim_proc_tmpl)
|
||||
args.modelsim_runsim_tmpl = os.path.abspath(args.modelsim_runsim_tmpl)
|
||||
|
@ -101,10 +111,8 @@ def main():
|
|||
clean_up_and_exit("Task run directory [%s] not found" % temp_dir)
|
||||
|
||||
# = = = = = = = Create a current script log file handler = = = =
|
||||
logfile_path = os.path.join(gc["task_dir"],
|
||||
taskname, task_run, "modelsim_run.log")
|
||||
resultfile_path = os.path.join(gc["task_dir"],
|
||||
taskname, task_run, "modelsim_result.csv")
|
||||
logfile_path = os.path.join(gc["task_dir"], taskname, task_run, "modelsim_run.log")
|
||||
resultfile_path = os.path.join(gc["task_dir"], taskname, task_run, "modelsim_result.csv")
|
||||
logfilefh = logging.FileHandler(logfile_path, "w")
|
||||
logfilefh.setFormatter(logging.Formatter(FILE_LOG_FORMAT))
|
||||
logger.addHandler(logfilefh)
|
||||
|
@ -120,8 +128,9 @@ def main():
|
|||
task_ini_files = []
|
||||
for eachfile in logfiles:
|
||||
with open(eachfile) as fp:
|
||||
run_dir = [re.findall(r'^INFO.*Run directory : (.*)$', line)
|
||||
for line in open(eachfile)]
|
||||
run_dir = [
|
||||
re.findall(r"^INFO.*Run directory : (.*)$", line) for line in open(eachfile)
|
||||
]
|
||||
run_dir = filter(bool, run_dir)
|
||||
for each_run in run_dir:
|
||||
INIfile = os.path.join(each_run[0], args.ini_filename)
|
||||
|
@ -152,10 +161,10 @@ def create_tcl_script(files):
|
|||
|
||||
# Resolve project Modelsim project path
|
||||
args.modelsim_run_dir = os.path.dirname(os.path.abspath(eachFile))
|
||||
modelsim_proj_dir = os.path.join(
|
||||
args.modelsim_run_dir, "MMSIM2")
|
||||
logger.info(f"Modelsim project dir not provide " +
|
||||
f"using default {modelsim_proj_dir} directory")
|
||||
modelsim_proj_dir = os.path.join(args.modelsim_run_dir, "MMSIM2")
|
||||
logger.info(
|
||||
f"Modelsim project dir not provide " + f"using default {modelsim_proj_dir} directory"
|
||||
)
|
||||
|
||||
modelsim_proj_dir = os.path.abspath(modelsim_proj_dir)
|
||||
config["MODELSIM_PROJ_DIR"] = modelsim_proj_dir
|
||||
|
@ -164,67 +173,68 @@ def create_tcl_script(files):
|
|||
|
||||
# Resolve Modelsim Project name
|
||||
args.modelsim_proj_name = config["BENCHMARK"] + "_MMSIM"
|
||||
logger.info(f"Modelsim project name not provide " +
|
||||
f"using default {args.modelsim_proj_name} directory")
|
||||
logger.info(
|
||||
f"Modelsim project name not provide "
|
||||
+ f"using default {args.modelsim_proj_name} directory"
|
||||
)
|
||||
|
||||
config["MODELSIM_PROJ_NAME"] = args.modelsim_proj_name
|
||||
config["MODELSIM_INI"] = args.modelsim_ini
|
||||
config["VERILOG_PATH"] = os.path.join(
|
||||
os.getcwd(), config["VERILOG_PATH"])
|
||||
IncludeFile = os.path.join(
|
||||
os.getcwd(),
|
||||
config["VERILOG_PATH"],
|
||||
config["VERILOG_FILE2"])
|
||||
config["VERILOG_PATH"] = os.path.join(os.getcwd(), config["VERILOG_PATH"])
|
||||
IncludeFile = os.path.join(os.getcwd(), config["VERILOG_PATH"], config["VERILOG_FILE2"])
|
||||
IncludeFileResolved = os.path.join(
|
||||
os.getcwd(),
|
||||
config["VERILOG_PATH"],
|
||||
config["VERILOG_FILE2"].replace(".v", "_resolved.v"))
|
||||
config["VERILOG_FILE2"].replace(".v", "_resolved.v"),
|
||||
)
|
||||
with open(IncludeFileResolved, "w") as fpw:
|
||||
with open(IncludeFile, "r") as fp:
|
||||
for eachline in fp.readlines():
|
||||
eachline = eachline.replace("\"./", "\"../../../")
|
||||
eachline = eachline.replace('"./', '"../../../')
|
||||
fpw.write(eachline)
|
||||
# Modify the variables in config file here
|
||||
config["TOP_TB"] = os.path.splitext(config["TOP_TB"])[0]
|
||||
|
||||
# Write final template file
|
||||
# Write runsim file
|
||||
tmpl = Template(open(args.modelsim_runsim_tmpl,
|
||||
encoding='utf-8').read())
|
||||
runsim_filename = os.path.join(modelsim_proj_dir,
|
||||
"%s_runsim.tcl" % config['BENCHMARK'])
|
||||
tmpl = Template(open(args.modelsim_runsim_tmpl, encoding="utf-8").read())
|
||||
runsim_filename = os.path.join(modelsim_proj_dir, "%s_runsim.tcl" % config["BENCHMARK"])
|
||||
logger.info(f"Creating tcl script at : {runsim_filename}")
|
||||
with open(runsim_filename, 'w', encoding='utf-8') as tclout:
|
||||
with open(runsim_filename, "w", encoding="utf-8") as tclout:
|
||||
tclout.write(tmpl.substitute(config))
|
||||
|
||||
# Write proc file
|
||||
proc_filename = os.path.join(modelsim_proj_dir,
|
||||
"%s_autocheck_proc.tcl" % config['BENCHMARK'])
|
||||
proc_filename = os.path.join(
|
||||
modelsim_proj_dir, "%s_autocheck_proc.tcl" % config["BENCHMARK"]
|
||||
)
|
||||
logger.info(f"Creating tcl script at : {proc_filename}")
|
||||
with open(proc_filename, 'w', encoding='utf-8') as tclout:
|
||||
tclout.write(open(args.modelsim_proc_tmpl,
|
||||
encoding='utf-8').read())
|
||||
runsim_files.append({
|
||||
"ini_file": eachFile,
|
||||
"modelsim_run_dir": args.modelsim_run_dir,
|
||||
"runsim_filename": runsim_filename,
|
||||
"run_complete": False,
|
||||
"status": False,
|
||||
"finished": True,
|
||||
"starttime": 0,
|
||||
"endtime": 0,
|
||||
"Errors": 0,
|
||||
"Warnings": 0
|
||||
})
|
||||
with open(proc_filename, "w", encoding="utf-8") as tclout:
|
||||
tclout.write(open(args.modelsim_proc_tmpl, encoding="utf-8").read())
|
||||
runsim_files.append(
|
||||
{
|
||||
"ini_file": eachFile,
|
||||
"modelsim_run_dir": args.modelsim_run_dir,
|
||||
"runsim_filename": runsim_filename,
|
||||
"run_complete": False,
|
||||
"status": False,
|
||||
"finished": True,
|
||||
"starttime": 0,
|
||||
"endtime": 0,
|
||||
"Errors": 0,
|
||||
"Warnings": 0,
|
||||
}
|
||||
)
|
||||
# Execute modelsim
|
||||
if args.run_sim:
|
||||
thread_sema = threading.Semaphore(args.maxthreads)
|
||||
logger.info("Launching %d parallel threads" % args.maxthreads)
|
||||
thread_list = []
|
||||
for thread_no, eachjob in enumerate(runsim_files):
|
||||
t = threading.Thread(target=run_modelsim_thread,
|
||||
name=f"Thread_{thread_no:d}",
|
||||
args=(thread_sema, eachjob, runsim_files))
|
||||
t = threading.Thread(
|
||||
target=run_modelsim_thread,
|
||||
name=f"Thread_{thread_no:d}",
|
||||
args=(thread_sema, eachjob, runsim_files),
|
||||
)
|
||||
t.start()
|
||||
thread_list.append(t)
|
||||
for eachthread in thread_list:
|
||||
|
@ -235,6 +245,7 @@ def create_tcl_script(files):
|
|||
logger.info(f"runsim_filename {runsim_filename}")
|
||||
logger.info(f"proc_filename {proc_filename}")
|
||||
from pprint import pprint
|
||||
|
||||
pprint(runsim_files)
|
||||
|
||||
|
||||
|
@ -247,24 +258,24 @@ def run_modelsim_thread(s, eachJob, job_list):
|
|||
eachJob["Warnings"] = 0
|
||||
try:
|
||||
logfile = "%s_modelsim.log" % thread_name
|
||||
eachJob["logfile"] = "<task_dir>" + \
|
||||
os.path.relpath(logfile, gc["task_dir"])
|
||||
with open(logfile, 'w+') as output:
|
||||
output.write("* "*20 + '\n')
|
||||
eachJob["logfile"] = "<task_dir>" + os.path.relpath(logfile, gc["task_dir"])
|
||||
with open(logfile, "w+") as output:
|
||||
output.write("* " * 20 + "\n")
|
||||
output.write("RunDirectory : %s\n" % os.getcwd())
|
||||
command = ["vsim", "-c", "-do", eachJob["runsim_filename"]]
|
||||
output.write(" ".join(command) + '\n')
|
||||
output.write("* "*20 + '\n')
|
||||
output.write(" ".join(command) + "\n")
|
||||
output.write("* " * 20 + "\n")
|
||||
logger.info("Running modelsim with [%s]" % " ".join(command))
|
||||
process = subprocess.Popen(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True)
|
||||
process = subprocess.Popen(
|
||||
command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
universal_newlines=True,
|
||||
)
|
||||
for line in process.stdout:
|
||||
if "Errors" in line:
|
||||
logger.info(line.strip())
|
||||
e, w = re.match(
|
||||
"# .*: ([0-9].*), .*: ([0-9].*)", line).groups()
|
||||
e, w = re.match("# .*: ([0-9].*), .*: ([0-9].*)", line).groups()
|
||||
eachJob["Errors"] += int(e)
|
||||
eachJob["Warnings"] += int(w)
|
||||
sys.stdout.buffer.flush()
|
||||
|
@ -276,37 +287,35 @@ def run_modelsim_thread(s, eachJob, job_list):
|
|||
if not eachJob["Errors"]:
|
||||
eachJob["status"] = True
|
||||
except:
|
||||
logger.exception("Failed to execute openfpga flow - " +
|
||||
eachJob["name"])
|
||||
logger.exception("Failed to execute openfpga flow - " + eachJob["name"])
|
||||
if not args.continue_on_fail:
|
||||
os._exit(1)
|
||||
eachJob["endtime"] = time.time()
|
||||
timediff = timedelta(seconds=(eachJob["endtime"]-eachJob["starttime"]))
|
||||
timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules \
|
||||
else str(timediff)
|
||||
timediff = timedelta(seconds=(eachJob["endtime"] - eachJob["starttime"]))
|
||||
timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules else str(timediff)
|
||||
eachJob["exectime"] = timestr
|
||||
logger.info("%s Finished with returncode %d, Time Taken %s " %
|
||||
(thread_name, process.returncode, timestr))
|
||||
logger.info(
|
||||
"%s Finished with returncode %d, Time Taken %s "
|
||||
% (thread_name, process.returncode, timestr)
|
||||
)
|
||||
eachJob["finished"] = True
|
||||
no_of_finished_job = sum([not eachJ["finished"] for eachJ in job_list])
|
||||
logger.info("***** %d runs pending *****" % (no_of_finished_job))
|
||||
|
||||
|
||||
def collect_result(result_file, result_obj):
|
||||
colnames = ["status", "Errors", "Warnings",
|
||||
"run_complete", "exectime", "finished", "logfile"]
|
||||
colnames = ["status", "Errors", "Warnings", "run_complete", "exectime", "finished", "logfile"]
|
||||
if len(result_obj):
|
||||
with open(result_file, 'w', newline='') as csvfile:
|
||||
writer = csv.DictWriter(
|
||||
csvfile, extrasaction='ignore', fieldnames=colnames)
|
||||
with open(result_file, "w", newline="") as csvfile:
|
||||
writer = csv.DictWriter(csvfile, extrasaction="ignore", fieldnames=colnames)
|
||||
writer.writeheader()
|
||||
for eachResult in result_obj:
|
||||
writer.writerow(eachResult)
|
||||
logger.info("= = = ="*10)
|
||||
logger.info("= = = =" * 10)
|
||||
passed_jobs = [each["status"] for each in result_obj]
|
||||
logger.info(f"Passed Jobs %d/%d", len(passed_jobs), len(result_obj))
|
||||
logger.info(f"Result file stored at {result_file}")
|
||||
logger.info("= = = ="*10)
|
||||
logger.info("= = = =" * 10)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -1,3 +1,8 @@
|
|||
envyaml==1.0.201125
|
||||
humanize==3.1.0
|
||||
coloredlogs==9.1
|
||||
coloredlogs==9.1
|
||||
|
||||
# Python linter and formatter
|
||||
click==8.0.2 # Our version of black needs an older version of click (https://stackoverflow.com/questions/71673404/importerror-cannot-import-name-unicodefun-from-click)
|
||||
black==20.8b1
|
||||
pylint==2.7.4
|
||||
|
|
Loading…
Reference in New Issue