diff --git a/openfpga_flow/scripts/arch_file_updater.py b/openfpga_flow/scripts/arch_file_updater.py index d301ce913..3538ec7f3 100644 --- a/openfpga_flow/scripts/arch_file_updater.py +++ b/openfpga_flow/scripts/arch_file_updater.py @@ -1,6 +1,6 @@ # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Script Name : arch_file_updater.py -# Description : This script designed to update architecture files +# Description : This script designed to update architecture files # from a lower version to higher version # Author : Xifan Tang # Email : xifan@osfpga.org @@ -19,17 +19,12 @@ import re ##################################################################### # Error codes ##################################################################### -error_codes = { - "SUCCESS": 0, - "ERROR": 1, - "OPTION_ERROR": 2, - "FILE_ERROR": 3 -} +error_codes = {"SUCCESS": 0, "ERROR": 1, "OPTION_ERROR": 2, "FILE_ERROR": 3} ##################################################################### -# Initialize logger +# Initialize logger ##################################################################### -logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO); +logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO) ##################################################################### # Upgrade an architecture XML file from version 1.1 syntax to version 1.2 @@ -41,77 +36,86 @@ logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO); # - The attribute 'capacity' of parent is removed ##################################################################### def convert_arch_xml_from_v1p1_to_v1p2(input_fname, output_fname): - # Constants - TILE_ROOT_TAG = "tiles" - TILE_NODE_TAG = "tile" - SUB_TILE_NODE_TAG = "sub_tile" - NAME_TAG = "name" - CAPACITY_TAG = "capacity" + # Constants + TILE_ROOT_TAG = "tiles" + TILE_NODE_TAG = "tile" + SUB_TILE_NODE_TAG = "sub_tile" + NAME_TAG = "name" + CAPACITY_TAG = "capacity" - # Log runtime and status - status = error_codes["SUCCESS"] - start_time = time.time() + # Log runtime and status + status = error_codes["SUCCESS"] + start_time = time.time() - log_str = "Converting \'" + input_fname + "\'" + " to " + "\'" + output_fname + "\'..." - logging.info(log_str) - # Parse the input file - doc = minidom.parse(input_fname) + log_str = "Converting '" + input_fname + "'" + " to " + "'" + output_fname + "'..." + logging.info(log_str) + # Parse the input file + doc = minidom.parse(input_fname) - # Iterate over nodes - num_tile_roots = len(doc.getElementsByTagName(TILE_ROOT_TAG)) - if (num_tile_roots != 1): - logging.info("Found " + str(num_tile_roots) + " <" + TILE_ROOT_TAG + ">") - logging.error("Fail to find a require node (one and only one) <" + TILE_ROOT_TAG + "> under the root node!") - return error_codes["ERROR"] - tile_root = doc.getElementsByTagName(TILE_ROOT_TAG)[0] - for tile_node in tile_root.getElementsByTagName(TILE_NODE_TAG): - # Create a new child node - sub_tile_node = doc.createElement(SUB_TILE_NODE_TAG) - # Add attributes to the new child node - sub_tile_node.setAttribute(NAME_TAG, tile_node.getAttribute(NAME_TAG)) - if tile_node.hasAttribute(CAPACITY_TAG): - sub_tile_node.setAttribute(CAPACITY_TAG, tile_node.getAttribute(CAPACITY_TAG)) - # Delete the out-of-date attributes - tile_node.removeAttribute(CAPACITY_TAG) - # Move other subelements to the new child node - for child in tile_node.childNodes: - # Add the node to the child node - child_clone = child.cloneNode(deep=True) - sub_tile_node.appendChild(child_clone) - # Remove no longer required child nodes - while (tile_node.hasChildNodes()): - tile_node.removeChild(tile_node.firstChild) - # Append the sub tile child to the tile node - tile_node.appendChild(sub_tile_node) + # Iterate over nodes + num_tile_roots = len(doc.getElementsByTagName(TILE_ROOT_TAG)) + if num_tile_roots != 1: + logging.info("Found " + str(num_tile_roots) + " <" + TILE_ROOT_TAG + ">") + logging.error( + "Fail to find a require node (one and only one) <" + + TILE_ROOT_TAG + + "> under the root node!" + ) + return error_codes["ERROR"] + tile_root = doc.getElementsByTagName(TILE_ROOT_TAG)[0] + for tile_node in tile_root.getElementsByTagName(TILE_NODE_TAG): + # Create a new child node + sub_tile_node = doc.createElement(SUB_TILE_NODE_TAG) + # Add attributes to the new child node + sub_tile_node.setAttribute(NAME_TAG, tile_node.getAttribute(NAME_TAG)) + if tile_node.hasAttribute(CAPACITY_TAG): + sub_tile_node.setAttribute(CAPACITY_TAG, tile_node.getAttribute(CAPACITY_TAG)) + # Delete the out-of-date attributes + tile_node.removeAttribute(CAPACITY_TAG) + # Move other subelements to the new child node + for child in tile_node.childNodes: + # Add the node to the child node + child_clone = child.cloneNode(deep=True) + sub_tile_node.appendChild(child_clone) + # Remove no longer required child nodes + while tile_node.hasChildNodes(): + tile_node.removeChild(tile_node.firstChild) + # Append the sub tile child to the tile node + tile_node.appendChild(sub_tile_node) - # Output the modified content - with open(output_fname, "w") as output_xml_f: - doc.writexml(output_xml_f, indent='', addindent=" ", newl='') - doc.unlink() + # Output the modified content + with open(output_fname, "w") as output_xml_f: + doc.writexml(output_xml_f, indent="", addindent=" ", newl="") + doc.unlink() + + # Finish up + end_time = time.time() + time_diff = timedelta(seconds=(end_time - start_time)) + log_end_str1 = "[Done]" + log_end_str2 = " took " + str(time_diff) + logging.info( + log_end_str1 + "." * (len(log_str) - len(log_end_str1) - len(log_end_str2)) + log_end_str2 + ) + return status - # Finish up - end_time = time.time() - time_diff = timedelta(seconds=(end_time - start_time)) - log_end_str1 = "[Done]" - log_end_str2 = " took " + str(time_diff) - logging.info(log_end_str1 + "." * (len(log_str) - len(log_end_str1) - len(log_end_str2)) + log_end_str2) - return status ##################################################################### # Main function ##################################################################### -if __name__ == '__main__': - # Execute when the module is not initialized from an import statement +if __name__ == "__main__": + # Execute when the module is not initialized from an import statement - # Parse the options and apply sanity checks - parser = argparse.ArgumentParser(description='Convert an architecture file from a lower version to a higher version') - parser.add_argument('--input_file', - required=True, - help='Path to input architecture file') - parser.add_argument('--output_file', - default="converted_arch.xml", - help='Path to output converted architecture file') - args = parser.parse_args() + # Parse the options and apply sanity checks + parser = argparse.ArgumentParser( + description="Convert an architecture file from a lower version to a higher version" + ) + parser.add_argument("--input_file", required=True, help="Path to input architecture file") + parser.add_argument( + "--output_file", + default="converted_arch.xml", + help="Path to output converted architecture file", + ) + args = parser.parse_args() - # Run conversion: from v1.1 syntax to v1.2 syntax - exit(convert_arch_xml_from_v1p1_to_v1p2(args.input_file, args.output_file)) + # Run conversion: from v1.1 syntax to v1.2 syntax + exit(convert_arch_xml_from_v1p1_to_v1p2(args.input_file, args.output_file)) diff --git a/openfpga_flow/scripts/check_qor.py b/openfpga_flow/scripts/check_qor.py index 9b55e64c1..5913daeca 100644 --- a/openfpga_flow/scripts/check_qor.py +++ b/openfpga_flow/scripts/check_qor.py @@ -23,23 +23,36 @@ csv_metric_tag = "metric" ##################################################################### # Initialize logger ##################################################################### -logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG) +logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.DEBUG) ##################################################################### # Parse the options # - [mandatory option] the file path to .csv file ##################################################################### parser = argparse.ArgumentParser( - description='A checker for hetergeneous block mapping in OpenFPGA flow') -parser.add_argument('--check_csv_file', required=True, - help='Specify the to-be-checked csv file constaining flow-run information') -parser.add_argument('--reference_csv_file', required=True, - help='Specify the reference csv file constaining flow-run information') -parser.add_argument('--metric_checklist_csv_file', required=True, - help='Specify the csv file constaining metrics to be checked') + description="A checker for hetergeneous block mapping in OpenFPGA flow" +) +parser.add_argument( + "--check_csv_file", + required=True, + help="Specify the to-be-checked csv file constaining flow-run information", +) +parser.add_argument( + "--reference_csv_file", + required=True, + help="Specify the reference csv file constaining flow-run information", +) +parser.add_argument( + "--metric_checklist_csv_file", + required=True, + help="Specify the csv file constaining metrics to be checked", +) # By default, allow a 50% tolerance when checking metrics -parser.add_argument('--check_tolerance', default="0.5,1.5", - help='Specify the tolerance when checking metrics. Format ,') +parser.add_argument( + "--check_tolerance", + default="0.5,1.5", + help="Specify the tolerance when checking metrics. Format ,", +) args = parser.parse_args() ##################################################################### @@ -48,37 +61,45 @@ args = parser.parse_args() # Otherwise, error out ##################################################################### if not isfile(args.check_csv_file): - logging.error("Invalid csv file to check: " + args.check_csv_file + "\nFile does not exist!\n") - exit(1) + logging.error("Invalid csv file to check: " + args.check_csv_file + "\nFile does not exist!\n") + exit(1) if not isfile(args.reference_csv_file): - logging.error("Invalid reference csv file: " + args.reference_csv_file + "\nFile does not exist!\n") - exit(1) + logging.error( + "Invalid reference csv file: " + args.reference_csv_file + "\nFile does not exist!\n" + ) + exit(1) if not isfile(args.metric_checklist_csv_file): - logging.error("Invalid metric checklist csv file: " + args.metric_checklist_csv_file + "\nFile does not exist!\n") - exit(1) + logging.error( + "Invalid metric checklist csv file: " + + args.metric_checklist_csv_file + + "\nFile does not exist!\n" + ) + exit(1) ##################################################################### # Parse a checklist for metrics to be checked ##################################################################### metric_checklist_csv_file = open(args.metric_checklist_csv_file, "r") -metric_checklist_csv_content = csv.DictReader(filter(lambda row : row[0]!='#', metric_checklist_csv_file), delimiter=',') +metric_checklist_csv_content = csv.DictReader( + filter(lambda row: row[0] != "#", metric_checklist_csv_file), delimiter="," +) # Hash the reference results with the name tag metric_checklist = [] for row in metric_checklist_csv_content: - metric_checklist.append(row[csv_metric_tag]); + metric_checklist.append(row[csv_metric_tag]) ##################################################################### # Parse the reference csv file # Skip any line start with '#' which is treated as comments ##################################################################### ref_csv_file = open(args.reference_csv_file, "r") -ref_csv_content = csv.DictReader(filter(lambda row : row[0]!='#', ref_csv_file), delimiter=',') +ref_csv_content = csv.DictReader(filter(lambda row: row[0] != "#", ref_csv_file), delimiter=",") # Hash the reference results with the name tag ref_results = {} for row in ref_csv_content: - ref_results[row[csv_name_tag]] = row; + ref_results[row[csv_name_tag]] = row ##################################################################### # Parse the tolerance to be applied when checking metrics @@ -89,41 +110,62 @@ upper_bound_factor = float(args.check_tolerance.split(",")[1]) ##################################################################### # Parse the csv file to check ##################################################################### -with open(args.check_csv_file, newline='') as check_csv_file: - results_to_check = csv.DictReader(check_csv_file, delimiter=',') - checkpoint_count = 0 - check_error_count = 0 - for row in results_to_check: - # Start from line 1 and check information - for metric_to_check in metric_checklist: - # Check if the metric is in a range - if (lower_bound_factor * float(ref_results[row[csv_name_tag]][metric_to_check]) > float(row[metric_to_check])) or (upper_bound_factor * float(ref_results[row[csv_name_tag]][metric_to_check]) < float(row[metric_to_check])) : - # Check QoR failed, error out - logging.error("Benchmark " + str(row[csv_name_tag]) + " failed in checking '" + str(metric_to_check) +"'\n" + "Found: " + str(row[metric_to_check]) + " but expected: " + str(ref_results[row[csv_name_tag]][metric_to_check]) + " outside range [" + str(lower_bound_factor * 100) + "%, " + str(upper_bound_factor * 100) + "%]") - check_error_count += 1 - # Pass this metric check, increase counter - checkpoint_count += 1 - logging.info("Checked " + str(checkpoint_count) + " metrics") - logging.info("See " + str(check_error_count) + " QoR failures") +with open(args.check_csv_file, newline="") as check_csv_file: + results_to_check = csv.DictReader(check_csv_file, delimiter=",") + checkpoint_count = 0 + check_error_count = 0 + for row in results_to_check: + # Start from line 1 and check information + for metric_to_check in metric_checklist: + # Check if the metric is in a range + if ( + lower_bound_factor * float(ref_results[row[csv_name_tag]][metric_to_check]) + > float(row[metric_to_check]) + ) or ( + upper_bound_factor * float(ref_results[row[csv_name_tag]][metric_to_check]) + < float(row[metric_to_check]) + ): + # Check QoR failed, error out + logging.error( + "Benchmark " + + str(row[csv_name_tag]) + + " failed in checking '" + + str(metric_to_check) + + "'\n" + + "Found: " + + str(row[metric_to_check]) + + " but expected: " + + str(ref_results[row[csv_name_tag]][metric_to_check]) + + " outside range [" + + str(lower_bound_factor * 100) + + "%, " + + str(upper_bound_factor * 100) + + "%]" + ) + check_error_count += 1 + # Pass this metric check, increase counter + checkpoint_count += 1 + logging.info("Checked " + str(checkpoint_count) + " metrics") + logging.info("See " + str(check_error_count) + " QoR failures") - if (0 < check_error_count): - exit(1) + if 0 < check_error_count: + exit(1) ##################################################################### # Post checked results on stdout: # reaching here, it means all the checks have passed ##################################################################### -with open(args.check_csv_file, newline='') as check_csv_file: - results_to_check = csv.DictReader(check_csv_file, delimiter=',') - # Print out keywords: name + metric checklist - print(str(csv_name_tag) + " ", end='') - for metric_to_check in metric_checklist: - print(str(metric_to_check) + " ", end='') - print("") - - for row in results_to_check: - # Start from line 1, print checked metrics - print(row[csv_name_tag] + " ", end='') +with open(args.check_csv_file, newline="") as check_csv_file: + results_to_check = csv.DictReader(check_csv_file, delimiter=",") + # Print out keywords: name + metric checklist + print(str(csv_name_tag) + " ", end="") for metric_to_check in metric_checklist: - print(row[metric_to_check] + " ", end='') + print(str(metric_to_check) + " ", end="") print("") + + for row in results_to_check: + # Start from line 1, print checked metrics + print(row[csv_name_tag] + " ", end="") + for metric_to_check in metric_checklist: + print(row[metric_to_check] + " ", end="") + print("") diff --git a/openfpga_flow/scripts/io_sequence_visualizer.py b/openfpga_flow/scripts/io_sequence_visualizer.py index c71d68a08..d80acf1f2 100644 --- a/openfpga_flow/scripts/io_sequence_visualizer.py +++ b/openfpga_flow/scripts/io_sequence_visualizer.py @@ -33,16 +33,10 @@ def draw_connections(width, height, connections): dwg.add(dwg_main) for w in range(1, width + 2): - dwg_main.add( - dwg.line( - (w * SCALE, SCALE), (w * SCALE, (height + 1) * SCALE), stroke="red" - ) - ) + dwg_main.add(dwg.line((w * SCALE, SCALE), (w * SCALE, (height + 1) * SCALE), stroke="red")) for h in range(1, height + 2): - dwg_main.add( - dwg.line((SCALE, h * SCALE), ((width + 1) * SCALE, h * SCALE), stroke="red") - ) + dwg_main.add(dwg.line((SCALE, h * SCALE), ((width + 1) * SCALE, h * SCALE), stroke="red")) path = "M " for point in connections: diff --git a/openfpga_flow/scripts/run_ci_tests.py b/openfpga_flow/scripts/run_ci_tests.py index 0e6c4e86f..9bb269273 100644 --- a/openfpga_flow/scripts/run_ci_tests.py +++ b/openfpga_flow/scripts/run_ci_tests.py @@ -39,63 +39,70 @@ if sys.version_info[0] < 3: # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = LOG_FORMAT = "%(levelname)5s (%(threadName)15s) - %(message)s" if util.find_spec("coloredlogs"): - coloredlogs.install(level='INFO', stream=sys.stdout, - fmt=LOG_FORMAT) + coloredlogs.install(level="INFO", stream=sys.stdout, fmt=LOG_FORMAT) else: - logging.basicConfig(level=logging.INFO, stream=sys.stdout, - format=LOG_FORMAT) -logger = logging.getLogger('OpenFPGA_Task_logs') + logging.basicConfig(level=logging.INFO, stream=sys.stdout, format=LOG_FORMAT) +logger = logging.getLogger("OpenFPGA_Task_logs") # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Read commandline arguments # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = parser = argparse.ArgumentParser() -parser.add_argument('tasks', nargs='+') -parser.add_argument('--maxthreads', type=int, default=2, - help="Number of fpga_flow threads to run default = 2," + - "Typically <= Number of processors on the system") -parser.add_argument('--remove_run_dir', type=str, - help="Remove run dir " + - "'all' to remove all." + - ", to remove specific run dir" + - "- To remove range of directory") -parser.add_argument('--config', help="Override default configuration") -parser.add_argument('--test_run', action="store_true", - help="Dummy run shows final generated VPR commands") -parser.add_argument('--debug', action="store_true", - help="Run script in debug mode") -parser.add_argument('--continue_on_fail', action="store_true", - help="Exit script with return code") -parser.add_argument('--show_thread_logs', action="store_true", - help="Skips logs from running thread") +parser.add_argument("tasks", nargs="+") +parser.add_argument( + "--maxthreads", + type=int, + default=2, + help="Number of fpga_flow threads to run default = 2," + + "Typically <= Number of processors on the system", +) +parser.add_argument( + "--remove_run_dir", + type=str, + help="Remove run dir " + + "'all' to remove all." + + ", to remove specific run dir" + + "- To remove range of directory", +) +parser.add_argument("--config", help="Override default configuration") +parser.add_argument( + "--test_run", action="store_true", help="Dummy run shows final generated VPR commands" +) +parser.add_argument("--debug", action="store_true", help="Run script in debug mode") +parser.add_argument("--continue_on_fail", action="store_true", help="Exit script with return code") +parser.add_argument( + "--show_thread_logs", action="store_true", help="Skips logs from running thread" +) args = parser.parse_args() # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Read script configuration file # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = task_script_dir = os.path.dirname(os.path.abspath(__file__)) -DESIGN_PATH_ENV=os.environ['design_path'] -DESIGN_TOP_ENV=os.environ['design_top'] -script_env_vars = ({"PATH": { - "DESIGN_PATH": DESIGN_PATH_ENV, - "DESIGN_TOP":DESIGN_TOP_ENV, - "OPENFPGA_FLOW_PATH": task_script_dir, - "ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), - "OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"), - "BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), - "TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), - "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), - "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), - "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, - os.pardir))}}) +DESIGN_PATH_ENV = os.environ["design_path"] +DESIGN_TOP_ENV = os.environ["design_top"] +script_env_vars = { + "PATH": { + "DESIGN_PATH": DESIGN_PATH_ENV, + "DESIGN_TOP": DESIGN_TOP_ENV, + "OPENFPGA_FLOW_PATH": task_script_dir, + "ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), + "OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"), + "BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), + "TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), + "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), + "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), + "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)), + } +} config = ConfigParser(interpolation=ExtendedInterpolation()) config.read_dict(script_env_vars) -config.read_file(open(os.path.join(task_script_dir, 'run_fpga_task.conf'))) +config.read_file(open(os.path.join(task_script_dir, "run_fpga_task.conf"))) gc = config["GENERAL CONFIGURATION"] - + def main(): validate_command_line_arguments() for eachtask in args.tasks: @@ -113,6 +120,7 @@ def main(): logger.info("Task execution completed") exit(0) + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Subroutines starts here # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = @@ -155,14 +163,13 @@ def remove_run_dir(): try: for eachdir in remove_dir: - logger.info('Removing run_dir %s' % (eachdir)) - if os.path.exists('latest'): - if eachdir == os.readlink('latest'): + logger.info("Removing run_dir %s" % (eachdir)) + if os.path.exists("latest"): + if eachdir == os.readlink("latest"): remove_dir += ["latest"] shutil.rmtree(eachdir, ignore_errors=True) except: - logger.exception("Failed to remove %s run directory" % - (eachdir or "Unknown")) + logger.exception("Failed to remove %s run directory" % (eachdir or "Unknown")) def generate_each_task_actions(taskname): @@ -179,46 +186,56 @@ def generate_each_task_actions(taskname): elif os.path.isdir(repo_tasks): curr_task_dir = repo_tasks else: - clean_up_and_exit("Task directory [%s] not found" % taskname + " locally at [%s]" % local_tasks + " or in OpenFPGA task directory [%s]" % repo_tasks) + clean_up_and_exit( + "Task directory [%s] not found" % taskname + + " locally at [%s]" % local_tasks + + " or in OpenFPGA task directory [%s]" % repo_tasks + ) os.chdir(curr_task_dir) curr_task_conf_file = os.path.join(curr_task_dir, "config", "task.conf") if not os.path.isfile(curr_task_conf_file): - clean_up_and_exit( - "Missing configuration file for task %s" % curr_task_dir) + clean_up_and_exit("Missing configuration file for task %s" % curr_task_dir) # Create run directory for current task run ./runxxx - run_dirs = [int(os.path.basename(x)[-3:]) for x in glob.glob('run*[0-9]')] - curr_run_dir = "run%03d" % (max(run_dirs+[0, ])+1) + run_dirs = [int(os.path.basename(x)[-3:]) for x in glob.glob("run*[0-9]")] + curr_run_dir = "run%03d" % ( + max( + run_dirs + + [ + 0, + ] + ) + + 1 + ) if args.remove_run_dir: remove_run_dir() return try: os.mkdir(curr_run_dir) - if os.path.islink('latest') or os.path.exists('latest'): + if os.path.islink("latest") or os.path.exists("latest"): os.remove("latest") os.symlink(curr_run_dir, "latest") - logger.info('Created "%s" directory for current task run' % - curr_run_dir) + logger.info('Created "%s" directory for current task run' % curr_run_dir) except: logger.exception("") logger.error("Failed to create new run directory in task directory") os.chdir(curr_run_dir) # Read task configuration file and check consistency - task_conf = ConfigParser(allow_no_value=True, - interpolation=ExtendedInterpolation()) - script_env_vars['PATH']["TASK_NAME"] = "/".join(taskname) - script_env_vars['PATH']["TASK_DIR"] = curr_task_dir + task_conf = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation()) + script_env_vars["PATH"]["TASK_NAME"] = "/".join(taskname) + script_env_vars["PATH"]["TASK_DIR"] = curr_task_dir task_conf.read_dict(script_env_vars) task_conf.read_file(open(curr_task_conf_file)) required_sec = ["GENERAL", "BENCHMARKS", "ARCHITECTURES"] - missing_section = list(set(required_sec)-set(task_conf.sections())) + missing_section = list(set(required_sec) - set(task_conf.sections())) if missing_section: - clean_up_and_exit("Missing sections %s" % " ".join(missing_section) + - " in task configuration file") + clean_up_and_exit( + "Missing sections %s" % " ".join(missing_section) + " in task configuration file" + ) # Declare varibles to access sections TaskFileSections = task_conf.sections() @@ -233,14 +250,12 @@ def generate_each_task_actions(taskname): if os.path.isfile(arch_full_path): archfile_list.append(arch_full_path) else: - clean_up_and_exit("Architecture file not found: " + - "%s " % arch_file) + clean_up_and_exit("Architecture file not found: " + "%s " % arch_file) if not len(archfile_list) == len(list(set(archfile_list))): clean_up_and_exit("Found duplicate architectures in config file") # Get Flow information - logger.info('Running "%s" flow' % - GeneralSection.get("fpga_flow", fallback="yosys_vpr")) + logger.info('Running "%s" flow' % GeneralSection.get("fpga_flow", fallback="yosys_vpr")) # Check if specified benchmark files exist benchmark_list = [] @@ -253,8 +268,9 @@ def generate_each_task_actions(taskname): for eachpath in each_benchmark.split(","): files = glob.glob(eachpath) if not len(files): - clean_up_and_exit(("No files added benchmark %s" % bech_name) + - " with path %s " % (eachpath)) + clean_up_and_exit( + ("No files added benchmark %s" % bech_name) + " with path %s " % (eachpath) + ) bench_files += files # Read provided benchmark configurations @@ -268,28 +284,31 @@ def generate_each_task_actions(taskname): # Individual benchmark configuration CurrBenchPara["files"] = bench_files - CurrBenchPara["top_module"] = SynthSection.get(bech_name+"_top", - fallback="top") - CurrBenchPara["ys_script"] = SynthSection.get(bech_name+"_yosys", - fallback=ys_for_task_common) - CurrBenchPara["ys_rewrite_script"] = SynthSection.get(bech_name+"_yosys_rewrite", - fallback=ys_rewrite_for_task_common) - CurrBenchPara["chan_width"] = SynthSection.get(bech_name+"_chan_width", - fallback=chan_width_common) + CurrBenchPara["top_module"] = SynthSection.get(bech_name + "_top", fallback="top") + CurrBenchPara["ys_script"] = SynthSection.get( + bech_name + "_yosys", fallback=ys_for_task_common + ) + CurrBenchPara["ys_rewrite_script"] = SynthSection.get( + bech_name + "_yosys_rewrite", fallback=ys_rewrite_for_task_common + ) + CurrBenchPara["chan_width"] = SynthSection.get( + bech_name + "_chan_width", fallback=chan_width_common + ) if GeneralSection.get("fpga_flow") == "vpr_blif": # Check if activity file exist - if not SynthSection.get(bech_name+"_act"): - clean_up_and_exit("Missing argument %s" % (bech_name+"_act") + - "for vpr_blif flow") - CurrBenchPara["activity_file"] = SynthSection.get(bech_name+"_act") + if not SynthSection.get(bech_name + "_act"): + clean_up_and_exit( + "Missing argument %s" % (bech_name + "_act") + "for vpr_blif flow" + ) + CurrBenchPara["activity_file"] = SynthSection.get(bech_name + "_act") # Check if base verilog file exists - if not SynthSection.get(bech_name+"_verilog"): - clean_up_and_exit("Missing argument %s for vpr_blif flow" % - (bech_name+"_verilog")) - CurrBenchPara["verilog_file"] = SynthSection.get( - bech_name+"_verilog") + if not SynthSection.get(bech_name + "_verilog"): + clean_up_and_exit( + "Missing argument %s for vpr_blif flow" % (bech_name + "_verilog") + ) + CurrBenchPara["verilog_file"] = SynthSection.get(bech_name + "_verilog") # Add script parameter list in current benchmark ScriptSections = [x for x in TaskFileSections if "SCRIPT_PARAM" in x] @@ -297,7 +316,7 @@ def generate_each_task_actions(taskname): for eachset in ScriptSections: command = [] for key, values in task_conf[eachset].items(): - command += ["--"+key, values] if values else ["--"+key] + command += ["--" + key, values] if values else ["--" + key] # Set label for Sript Parameters set_lbl = eachset.replace("SCRIPT_PARAM", "") @@ -312,7 +331,7 @@ def generate_each_task_actions(taskname): # which are uniquified benchmark_top_module_count = [] for bench in benchmark_list: - benchmark_top_module_count.append(bench["top_module"]) + benchmark_top_module_count.append(bench["top_module"]) # Create OpenFPGA flow run commnad for each combination of # architecture, benchmark and parameters @@ -321,38 +340,49 @@ def generate_each_task_actions(taskname): for indx, arch in enumerate(archfile_list): for bench in benchmark_list: for lbl, param in bench["script_params"].items(): - if (benchmark_top_module_count.count(bench["top_module"]) > 1): - flow_run_dir = get_flow_rundir(arch, "bench" + str(benchmark_list.index(bench)) + "_" + bench["top_module"], lbl) + if benchmark_top_module_count.count(bench["top_module"]) > 1: + flow_run_dir = get_flow_rundir( + arch, + "bench" + str(benchmark_list.index(bench)) + "_" + bench["top_module"], + lbl, + ) else: - flow_run_dir = get_flow_rundir(arch, bench["top_module"], lbl) - + flow_run_dir = get_flow_rundir(arch, bench["top_module"], lbl) + command = create_run_command( curr_job_dir=flow_run_dir, archfile=arch, benchmark_obj=bench, param=param, - task_conf=task_conf) - flow_run_cmd_list.append({ - "arch": arch, - "bench": bench, - "name": "%02d_%s_%s" % (indx, bench["top_module"], lbl), - "run_dir": flow_run_dir, - "commands": command, - "finished": False, - "status": False}) + task_conf=task_conf, + ) + flow_run_cmd_list.append( + { + "arch": arch, + "bench": bench, + "name": "%02d_%s_%s" % (indx, bench["top_module"], lbl), + "run_dir": flow_run_dir, + "commands": command, + "finished": False, + "status": False, + } + ) - logger.info('Found %d Architectures %d Benchmarks & %d Script Parameters' % - (len(archfile_list), len(benchmark_list), len(ScriptSections))) - logger.info('Created total %d jobs' % len(flow_run_cmd_list)) + logger.info( + "Found %d Architectures %d Benchmarks & %d Script Parameters" + % (len(archfile_list), len(benchmark_list), len(ScriptSections)) + ) + logger.info("Created total %d jobs" % len(flow_run_cmd_list)) return flow_run_cmd_list + # Make the directory name unique by including the benchmark index in the list. # This is because benchmarks may share the same top module names def get_flow_rundir(arch, top_module, flow_params=None): path = [ os.path.basename(arch).replace(".xml", ""), top_module, - flow_params if flow_params else "common" + flow_params if flow_params else "common", ] return os.path.abspath(os.path.join(*path)) @@ -372,8 +402,8 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf): if os.path.isdir(curr_job_dir): question = "One the result directory already exist.\n" question += "%s\n" % curr_job_dir - reply = str(input(question+' (y/n): ')).lower().strip() - if reply[:1] in ['y', 'yes']: + reply = str(input(question + " (y/n): ")).lower().strip() + if reply[:1] in ["y", "yes"]: shutil.rmtree(curr_job_dir) else: logger.info("Result directory removal denied by the user") @@ -392,8 +422,7 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf): if task_gc.get("run_engine") == "openfpga_shell": for eachKey in task_OFPGAc.keys(): - command += [f"--{eachKey}", - task_OFPGAc.get(f"{eachKey}")] + command += [f"--{eachKey}", task_OFPGAc.get(f"{eachKey}")] if benchmark_obj.get("activity_file"): command += ["--activity_file", benchmark_obj.get("activity_file")] @@ -433,8 +462,7 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf): def strip_child_logger_info(line): try: logtype, message = line.split(" - ", 1) - lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30, - "INFO": 20, "DEBUG": 10, "NOTSET": 0} + lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30, "INFO": 20, "DEBUG": 10, "NOTSET": 0} logger.log(lognumb[logtype.strip().upper()], message) except: logger.info(line) @@ -446,18 +474,22 @@ def run_single_script(s, eachJob, job_list): eachJob["starttime"] = time.time() try: logfile = "%s_out.log" % thread_name - with open(logfile, 'w+') as output: - output.write("* "*20 + '\n') + with open(logfile, "w+") as output: + output.write("* " * 20 + "\n") output.write("RunDirectory : %s\n" % os.getcwd()) - command = [os.getenv('PYTHON_EXEC', gc["python_path"]), gc["script_default"]] + \ - eachJob["commands"] - output.write(" ".join(command) + '\n') - output.write("* "*20 + '\n') + command = [ + os.getenv("PYTHON_EXEC", gc["python_path"]), + gc["script_default"], + ] + eachJob["commands"] + output.write(" ".join(command) + "\n") + output.write("* " * 20 + "\n") logger.debug("Running OpenFPGA flow with [%s]" % command) - process = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) for line in process.stdout: if args.show_thread_logs: strip_child_logger_info(line[:-1]) @@ -468,16 +500,16 @@ def run_single_script(s, eachJob, job_list): raise subprocess.CalledProcessError(0, " ".join(command)) eachJob["status"] = True except: - logger.exception("Failed to execute openfpga flow - " + - eachJob["name"]) + logger.exception("Failed to execute openfpga flow - " + eachJob["name"]) if not args.continue_on_fail: os._exit(1) eachJob["endtime"] = time.time() - timediff = timedelta(seconds=(eachJob["endtime"]-eachJob["starttime"])) - timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules \ - else str(timediff) - logger.info("%s Finished with returncode %d, Time Taken %s " % - (thread_name, process.returncode, timestr)) + timediff = timedelta(seconds=(eachJob["endtime"] - eachJob["starttime"])) + timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules else str(timediff) + logger.info( + "%s Finished with returncode %d, Time Taken %s " + % (thread_name, process.returncode, timestr) + ) eachJob["finished"] = True no_of_finished_job = sum([not eachJ["finished"] for eachJ in job_list]) logger.info("***** %d runs pending *****" % (no_of_finished_job)) @@ -487,8 +519,9 @@ def run_actions(job_list): thread_sema = threading.Semaphore(args.maxthreads) thread_list = [] for _, eachjob in enumerate(job_list): - t = threading.Thread(target=run_single_script, name=eachjob["name"], - args=(thread_sema, eachjob, job_list)) + t = threading.Thread( + target=run_single_script, name=eachjob["name"], args=(thread_sema, eachjob, job_list) + ) t.start() thread_list.append(t) for eachthread in thread_list: @@ -506,22 +539,19 @@ def collect_results(job_run_list): logger.info("No result files found for %s" % run["name"]) # Read and merge result file - vpr_res = ConfigParser(allow_no_value=True, - interpolation=ExtendedInterpolation()) - vpr_res.read_file( - open(os.path.join(run["run_dir"], "vpr_stat.result"))) + vpr_res = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation()) + vpr_res.read_file(open(os.path.join(run["run_dir"], "vpr_stat.result"))) result = OrderedDict() result["name"] = run["name"] - result["TotalRunTime"] = int(run["endtime"]-run["starttime"]) + result["TotalRunTime"] = int(run["endtime"] - run["starttime"]) result.update(vpr_res["RESULTS"]) task_result.append(result) colnames = [] for eachLbl in task_result: colnames.extend(eachLbl.keys()) if len(task_result): - with open("task_result.csv", 'w', newline='') as csvfile: - writer = csv.DictWriter( - csvfile, extrasaction='ignore', fieldnames=list(set(colnames))) + with open("task_result.csv", "w", newline="") as csvfile: + writer = csv.DictWriter(csvfile, extrasaction="ignore", fieldnames=list(set(colnames))) writer.writeheader() for eachResult in task_result: writer.writerow(eachResult) diff --git a/openfpga_flow/scripts/run_formality.py b/openfpga_flow/scripts/run_formality.py index f1d781626..87e1099bd 100644 --- a/openfpga_flow/scripts/run_formality.py +++ b/openfpga_flow/scripts/run_formality.py @@ -11,26 +11,26 @@ from configparser import ConfigParser # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Configure logging system # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = -logging.basicConfig(level=logging.INFO, stream=sys.stdout, - format='%(levelname)s (%(threadName)10s) - %(message)s') -logger = logging.getLogger('Modelsim_run_log') +logging.basicConfig( + level=logging.INFO, stream=sys.stdout, format="%(levelname)s (%(threadName)10s) - %(message)s" +) +logger = logging.getLogger("Modelsim_run_log") # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Parse commandline arguments # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = parser = argparse.ArgumentParser() -parser.add_argument('files', nargs='+') -parser.add_argument('--formality_template', type=str, - help="Modelsim verification template file") -parser.add_argument('--run_sim', action="store_true", - help="Execute generated script in formality") +parser.add_argument("files", nargs="+") +parser.add_argument("--formality_template", type=str, help="Modelsim verification template file") +parser.add_argument("--run_sim", action="store_true", help="Execute generated script in formality") args = parser.parse_args() # Consider default formality script template if not args.formality_template: task_script_dir = os.path.dirname(os.path.abspath(__file__)) - args.formality_template = os.path.join(task_script_dir, os.pardir, - "misc", "formality_template.tcl") + args.formality_template = os.path.join( + task_script_dir, os.pardir, "misc", "formality_template.tcl" + ) args.formality_template = os.path.abspath(args.formality_template) @@ -44,56 +44,53 @@ def main(): config = ConfigParser() config.read(eachFile) - port_map = ("set_user_match r:%s/%%s i:/WORK/%%s -type port -noninverted" % ( + port_map = "set_user_match r:%s/%%s i:/WORK/%%s -type port -noninverted" % ( "/WORK/" + config["BENCHMARK_INFO"]["src_top_module"] - )) - cell_map = ("set_user_match r:%s/%%s i:/WORK/%%s -type cell -noninverted" % ( + ) + cell_map = "set_user_match r:%s/%%s i:/WORK/%%s -type cell -noninverted" % ( "/WORK/" + config["BENCHMARK_INFO"]["src_top_module"] - )) + ) lables = { "SOURCE_DESIGN_FILES": config["BENCHMARK_INFO"]["benchmark_netlist"], "SOURCE_TOP_MODULE": "/WORK/" + config["BENCHMARK_INFO"]["src_top_module"], - "IMPL_DESIGN_FILES": " ".join( - [val for key, val in config["FPGA_INFO"].items() - if "impl_netlist_" in key]), + [val for key, val in config["FPGA_INFO"].items() if "impl_netlist_" in key] + ), "IMPL_TOP_DIR": "/WORK/" + config["FPGA_INFO"]["impl_top_module"], - - "PORT_MAP_LIST": "\n".join([port_map % - ele for ele in - config["PORT_MATCHING"].items()]), - "REGISTER_MAP_LIST": "\n".join([cell_map % - ele for ele in - config["REGISTER_MATCH"].items()]), + "PORT_MAP_LIST": "\n".join([port_map % ele for ele in config["PORT_MATCHING"].items()]), + "REGISTER_MAP_LIST": "\n".join( + [cell_map % ele for ele in config["REGISTER_MATCH"].items()] + ), } - tmpl = Template(open(args.formality_template, encoding='utf-8').read()) - with open(os.path.join(pDir, "Output.tcl"), 'w', encoding='utf-8') as tclout: + tmpl = Template(open(args.formality_template, encoding="utf-8").read()) + with open(os.path.join(pDir, "Output.tcl"), "w", encoding="utf-8") as tclout: tclout.write(tmpl.substitute(lables)) if args.run_sim: formality_run_string = ["formality", "-file", "Output.tcl"] run_command("Formality Run", "formality_run.log", formality_run_string) else: - with open("Output.tcl", 'r', encoding='utf-8') as tclout: + with open("Output.tcl", "r", encoding="utf-8") as tclout: print(tclout.read()) def run_command(taskname, logfile, command, exit_if_fail=True): os.chdir(os.pardir) logger.info("Launching %s " % taskname) - with open(logfile, 'w+') as output: + with open(logfile, "w+") as output: try: - output.write(" ".join(command)+"\n") - process = subprocess.run(command, - check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) + output.write(" ".join(command) + "\n") + process = subprocess.run( + command, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) output.write(process.stdout) if process.returncode: - logger.error("%s run failed with returncode %d" % - (taskname, process.returncode)) + logger.error("%s run failed with returncode %d" % (taskname, process.returncode)) except (Exception, subprocess.CalledProcessError) as e: logger.exception("failed to execute %s" % taskname) return None diff --git a/openfpga_flow/scripts/run_fpga_flow.py b/openfpga_flow/scripts/run_fpga_flow.py index 85f0399d7..3a936b9e7 100644 --- a/openfpga_flow/scripts/run_fpga_flow.py +++ b/openfpga_flow/scripts/run_fpga_flow.py @@ -27,6 +27,7 @@ from string import Template import re import xml.etree.ElementTree as ET from importlib import util + if util.find_spec("humanize"): import humanize @@ -39,24 +40,25 @@ if sys.version_info[0] < 3: # Copy directory where flow file exist flow_script_dir = os.path.dirname(os.path.abspath(__file__)) # Find OpenFPGA base directory -openfpga_base_dir = os.path.abspath( - os.path.join(flow_script_dir, os.pardir, os.pardir)) +openfpga_base_dir = os.path.abspath(os.path.join(flow_script_dir, os.pardir, os.pardir)) # Copy directory from where script is laucnhed # [req to resolve relative paths provided while launching script] launch_dir = os.getcwd() # Path section to append in configuration file to interpolate path task_script_dir = os.path.dirname(os.path.abspath(__file__)) -script_env_vars = ({"PATH": { - "OPENFPGA_FLOW_PATH": task_script_dir, - "ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), - "OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"), - "BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), - "TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), - "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), - "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), - "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, - os.pardir))}}) +script_env_vars = { + "PATH": { + "OPENFPGA_FLOW_PATH": task_script_dir, + "ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), + "OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"), + "BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), + "TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), + "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), + "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), + "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)), + } +} # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Reading command-line argument @@ -65,182 +67,259 @@ script_env_vars = ({"PATH": { # Helper function to provide better alignment to help print -def formatter(prog): return argparse.HelpFormatter(prog, max_help_position=60) +def formatter(prog): + return argparse.HelpFormatter(prog, max_help_position=60) parser = argparse.ArgumentParser(formatter_class=formatter) # Mandatory arguments -parser.add_argument('arch_file', type=str) -parser.add_argument('benchmark_files', type=str, nargs='+') +parser.add_argument("arch_file", type=str) +parser.add_argument("benchmark_files", type=str, nargs="+") # parser.add_argument('extraArgs', nargs=argparse.REMAINDER) -parser.add_argument('otherthings', nargs='*') +parser.add_argument("otherthings", nargs="*") # Optional arguments -parser.add_argument('--top_module', type=str, default="top") -parser.add_argument('--fpga_flow', type=str, default="yosys_vpr") -parser.add_argument('--flow_config', type=str, - help="CAD tools path overrides default setting") -parser.add_argument('--run_dir', type=str, - default=os.path.join(openfpga_base_dir, 'tmp'), - help="Directory to store intermidiate file & final results") -parser.add_argument('--openfpga_shell_template', type=str, - default=os.path.join("openfpga_flow", - "openfpga_shell_scripts", - "example_script.openfpga"), - help="Sample openfpga shell script") -parser.add_argument('--openfpga_arch_file', type=str, - help="Openfpga architecture file for shell") -parser.add_argument('--arch_variable_file', type=str, default=None, - help="Openfpga architecture file for shell") +parser.add_argument("--top_module", type=str, default="top") +parser.add_argument("--fpga_flow", type=str, default="yosys_vpr") +parser.add_argument("--flow_config", type=str, help="CAD tools path overrides default setting") +parser.add_argument( + "--run_dir", + type=str, + default=os.path.join(openfpga_base_dir, "tmp"), + help="Directory to store intermidiate file & final results", +) +parser.add_argument( + "--openfpga_shell_template", + type=str, + default=os.path.join("openfpga_flow", "openfpga_shell_scripts", "example_script.openfpga"), + help="Sample openfpga shell script", +) +parser.add_argument("--openfpga_arch_file", type=str, help="Openfpga architecture file for shell") +parser.add_argument( + "--arch_variable_file", type=str, default=None, help="Openfpga architecture file for shell" +) # parser.add_argument('--openfpga_sim_setting_file', type=str, # help="Openfpga simulation file for shell") # parser.add_argument('--external_fabric_key_file', type=str, # help="Key file for shell") -parser.add_argument('--yosys_tmpl', type=str, default=None, - help="Alternate yosys template, generates top_module.blif") -parser.add_argument('--ys_rewrite_tmpl', type=str, default=None, - help="Alternate yosys template, to rewrite verilog netlist") -parser.add_argument('--verific', action="store_true", - help="Run yosys with verific enabled") -parser.add_argument('--disp', action="store_true", - help="Open display while running VPR") -parser.add_argument('--debug', action="store_true", - help="Run script in debug mode") +parser.add_argument( + "--yosys_tmpl", + type=str, + default=None, + help="Alternate yosys template, generates top_module.blif", +) +parser.add_argument( + "--ys_rewrite_tmpl", + type=str, + default=None, + help="Alternate yosys template, to rewrite verilog netlist", +) +parser.add_argument("--verific", action="store_true", help="Run yosys with verific enabled") +parser.add_argument("--disp", action="store_true", help="Open display while running VPR") +parser.add_argument("--debug", action="store_true", help="Run script in debug mode") # Blif_VPR Only flow arguments -parser.add_argument('--activity_file', type=str, - help="Activity file used while running yosys flow") -parser.add_argument('--base_verilog', type=str, - help="Original Verilog file to run verification in " + - "blif_VPR flow") +parser.add_argument("--activity_file", type=str, help="Activity file used while running yosys flow") +parser.add_argument( + "--base_verilog", + type=str, + help="Original Verilog file to run verification in " + "blif_VPR flow", +) # ACE2 and power estimation related arguments -parser.add_argument('--K', type=int, - help="LUT Size, if not specified extracted from arch file") -parser.add_argument('--power', action='store_true') -parser.add_argument('--power_tech', type=str, - help="Power tech xml file for power calculation") -parser.add_argument('--ace_d', type=float, - help="Specify the default signal density of PIs in ACE2") -parser.add_argument('--ace_p', type=float, - help="Specify the default signal probablity of PIs in ACE2") -parser.add_argument('--black_box_ace', action='store_true') +parser.add_argument("--K", type=int, help="LUT Size, if not specified extracted from arch file") +parser.add_argument("--power", action="store_true") +parser.add_argument("--power_tech", type=str, help="Power tech xml file for power calculation") +parser.add_argument("--ace_d", type=float, help="Specify the default signal density of PIs in ACE2") +parser.add_argument( + "--ace_p", type=float, help="Specify the default signal probablity of PIs in ACE2" +) +parser.add_argument("--black_box_ace", action="store_true") # VPR Options -parser.add_argument('--min_route_chan_width', type=float, - help="Turn on min_route_chan_width") -parser.add_argument('--max_route_width_retry', type=int, default=100, - help="Maximum iterations to perform to reroute") -parser.add_argument('--fix_route_chan_width', type=int, - help="Turn on fix_route_chan_width") -parser.add_argument('--vpr_timing_pack_off', action='store_true', - help="Turn off the timing-driven pack for vpr") -parser.add_argument('--vpr_place_clb_pin_remap', action='store_true', - help="Turn on place_clb_pin_remap in VPR") -parser.add_argument('--vpr_max_router_iteration', type=int, - help="Specify the max router iteration in VPR") -parser.add_argument('--vpr_route_breadthfirst', action='store_true', - help="Use the breadth-first routing algorithm of VPR") -parser.add_argument('--vpr_use_tileable_route_chan_width', action='store_true', - help="Turn on the conversion to " + - "tileable_route_chan_width in VPR") +parser.add_argument("--min_route_chan_width", type=float, help="Turn on min_route_chan_width") +parser.add_argument( + "--max_route_width_retry", + type=int, + default=100, + help="Maximum iterations to perform to reroute", +) +parser.add_argument("--fix_route_chan_width", type=int, help="Turn on fix_route_chan_width") +parser.add_argument( + "--vpr_timing_pack_off", action="store_true", help="Turn off the timing-driven pack for vpr" +) +parser.add_argument( + "--vpr_place_clb_pin_remap", action="store_true", help="Turn on place_clb_pin_remap in VPR" +) +parser.add_argument( + "--vpr_max_router_iteration", type=int, help="Specify the max router iteration in VPR" +) +parser.add_argument( + "--vpr_route_breadthfirst", + action="store_true", + help="Use the breadth-first routing algorithm of VPR", +) +parser.add_argument( + "--vpr_use_tileable_route_chan_width", + action="store_true", + help="Turn on the conversion to " + "tileable_route_chan_width in VPR", +) # VPR - FPGA-X2P Extension -X2PParse = parser.add_argument_group('VPR-FPGA-X2P Extension') -X2PParse.add_argument('--vpr_fpga_x2p_rename_illegal_port', action='store_true', - help="Rename illegal ports option of VPR FPGA SPICE") -X2PParse.add_argument('--vpr_fpga_x2p_signal_density_weight', type=float, - help="Specify the signal_density_weight of VPR FPGA SPICE") -X2PParse.add_argument('--vpr_fpga_x2p_sim_window_size', type=float, - help="specify the sim_window_size of VPR FPGA SPICE") -X2PParse.add_argument('--vpr_fpga_x2p_compact_routing_hierarchy', - action="store_true", help="Compact_routing_hierarchy") -X2PParse.add_argument('--vpr_fpga_x2p_duplicate_grid_pin', action="store_true", - help="Added duplicated grid pin") +X2PParse = parser.add_argument_group("VPR-FPGA-X2P Extension") +X2PParse.add_argument( + "--vpr_fpga_x2p_rename_illegal_port", + action="store_true", + help="Rename illegal ports option of VPR FPGA SPICE", +) +X2PParse.add_argument( + "--vpr_fpga_x2p_signal_density_weight", + type=float, + help="Specify the signal_density_weight of VPR FPGA SPICE", +) +X2PParse.add_argument( + "--vpr_fpga_x2p_sim_window_size", + type=float, + help="specify the sim_window_size of VPR FPGA SPICE", +) +X2PParse.add_argument( + "--vpr_fpga_x2p_compact_routing_hierarchy", + action="store_true", + help="Compact_routing_hierarchy", +) +X2PParse.add_argument( + "--vpr_fpga_x2p_duplicate_grid_pin", action="store_true", help="Added duplicated grid pin" +) # VPR - FPGA-SPICE Extension -SPParse = parser.add_argument_group('FPGA-SPICE Extension') -SPParse.add_argument('--vpr_fpga_spice', action='store_true', - help="Print SPICE netlists in VPR") -SPParse.add_argument('--vpr_fpga_spice_sim_mt_num', type=int, - help="Specify the option sim_mt_num of VPR FPGA SPICE") -SPParse.add_argument('--vpr_fpga_spice_print_component_tb', action='store_true', - help="Output component-level testbench") -SPParse.add_argument('--vpr_fpga_spice_print_grid_tb', action='store_true', - help="Output grid-level testbench") -SPParse.add_argument('--vpr_fpga_spice_print_top_testbench', action='store_true', - help="Output full-chip-level testbench") -SPParse.add_argument('--vpr_fpga_spice_leakage_only', action='store_true', - help="Turn on leakage_only mode in VPR FPGA SPICE") -SPParse.add_argument('--vpr_fpga_spice_parasitic_net_estimation_off', - action='store_true', - help="Turn off parasitic_net_estimation in VPR FPGA SPICE") -SPParse.add_argument('--vpr_fpga_spice_testbench_load_extraction_off', - action='store_true', - help="Turn off testbench_load_extraction in VPR FPGA SPICE") -SPParse.add_argument('--vpr_fpga_spice_simulator_path', type=str, - help="Specify simulator path") +SPParse = parser.add_argument_group("FPGA-SPICE Extension") +SPParse.add_argument("--vpr_fpga_spice", action="store_true", help="Print SPICE netlists in VPR") +SPParse.add_argument( + "--vpr_fpga_spice_sim_mt_num", type=int, help="Specify the option sim_mt_num of VPR FPGA SPICE" +) +SPParse.add_argument( + "--vpr_fpga_spice_print_component_tb", + action="store_true", + help="Output component-level testbench", +) +SPParse.add_argument( + "--vpr_fpga_spice_print_grid_tb", action="store_true", help="Output grid-level testbench" +) +SPParse.add_argument( + "--vpr_fpga_spice_print_top_testbench", + action="store_true", + help="Output full-chip-level testbench", +) +SPParse.add_argument( + "--vpr_fpga_spice_leakage_only", + action="store_true", + help="Turn on leakage_only mode in VPR FPGA SPICE", +) +SPParse.add_argument( + "--vpr_fpga_spice_parasitic_net_estimation_off", + action="store_true", + help="Turn off parasitic_net_estimation in VPR FPGA SPICE", +) +SPParse.add_argument( + "--vpr_fpga_spice_testbench_load_extraction_off", + action="store_true", + help="Turn off testbench_load_extraction in VPR FPGA SPICE", +) +SPParse.add_argument("--vpr_fpga_spice_simulator_path", type=str, help="Specify simulator path") # VPR - FPGA-Verilog Extension -VeriPar = parser.add_argument_group('FPGA-Verilog Extension') -VeriPar.add_argument('--vpr_fpga_verilog', action='store_true', - help="Generator verilog of VPR FPGA SPICE") -VeriPar.add_argument('--vpr_fpga_verilog_dir', type=str, - help="path to store generated verilog files") -VeriPar.add_argument('--vpr_fpga_verilog_include_timing', action="store_true", - help="Print delay specification in Verilog files") -VeriPar.add_argument('--vpr_fpga_verilog_include_signal_init', - action="store_true", - help="Print signal initialization in Verilog files") -VeriPar.add_argument('--vpr_fpga_verilog_print_autocheck_top_testbench', - action="store_true", help="Print autochecked top-level " + - "testbench for Verilog Generator of VPR FPGA SPICE") -VeriPar.add_argument('--vpr_fpga_verilog_formal_verification_top_netlist', - action="store_true", help="Print formal top Verilog files") -VeriPar.add_argument('--vpr_fpga_verilog_include_icarus_simulator', - action="store_true", help="dd syntax and definition" + - " required to use Icarus Verilog simulator") -VeriPar.add_argument('--vpr_fpga_verilog_print_user_defined_template', - action="store_true", help="Unknown parameter") -VeriPar.add_argument('--vpr_fpga_verilog_print_report_timing_tcl', - action="store_true", help="Generate tcl script useful " + - "for timing report generation") -VeriPar.add_argument('--vpr_fpga_verilog_report_timing_rpt_path', - type=str, help="Specify path for report timing results") -VeriPar.add_argument('--vpr_fpga_verilog_print_sdc_pnr', action="store_true", - help="Generate sdc file to constraint Hardware P&R") -VeriPar.add_argument('--vpr_fpga_verilog_print_sdc_analysis', - action="store_true", help="Generate sdc file to do STA") -VeriPar.add_argument('--vpr_fpga_verilog_print_top_tb', action="store_true", - help="Print top-level testbench for Verilog Generator " + - "of VPR FPGA SPICE") -VeriPar.add_argument('--vpr_fpga_verilog_print_input_blif_tb', - action="store_true", help="Print testbench" + - "for input blif file in Verilog Generator") -VeriPar.add_argument('--vpr_fpga_verilog_print_simulation_ini', action="store_true", - help="Create simulation INI file") -VeriPar.add_argument('--vpr_fpga_verilog_explicit_mapping', action="store_true", - help="Explicit Mapping") +VeriPar = parser.add_argument_group("FPGA-Verilog Extension") +VeriPar.add_argument( + "--vpr_fpga_verilog", action="store_true", help="Generator verilog of VPR FPGA SPICE" +) +VeriPar.add_argument( + "--vpr_fpga_verilog_dir", type=str, help="path to store generated verilog files" +) +VeriPar.add_argument( + "--vpr_fpga_verilog_include_timing", + action="store_true", + help="Print delay specification in Verilog files", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_include_signal_init", + action="store_true", + help="Print signal initialization in Verilog files", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_print_autocheck_top_testbench", + action="store_true", + help="Print autochecked top-level " + "testbench for Verilog Generator of VPR FPGA SPICE", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_formal_verification_top_netlist", + action="store_true", + help="Print formal top Verilog files", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_include_icarus_simulator", + action="store_true", + help="dd syntax and definition" + " required to use Icarus Verilog simulator", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_print_user_defined_template", action="store_true", help="Unknown parameter" +) +VeriPar.add_argument( + "--vpr_fpga_verilog_print_report_timing_tcl", + action="store_true", + help="Generate tcl script useful " + "for timing report generation", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_report_timing_rpt_path", + type=str, + help="Specify path for report timing results", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_print_sdc_pnr", + action="store_true", + help="Generate sdc file to constraint Hardware P&R", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_print_sdc_analysis", action="store_true", help="Generate sdc file to do STA" +) +VeriPar.add_argument( + "--vpr_fpga_verilog_print_top_tb", + action="store_true", + help="Print top-level testbench for Verilog Generator " + "of VPR FPGA SPICE", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_print_input_blif_tb", + action="store_true", + help="Print testbench" + "for input blif file in Verilog Generator", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_print_simulation_ini", + action="store_true", + help="Create simulation INI file", +) +VeriPar.add_argument( + "--vpr_fpga_verilog_explicit_mapping", action="store_true", help="Explicit Mapping" +) # VPR - FPGA-Bitstream Extension -BSparse = parser.add_argument_group('FPGA-Bitstream Extension') -BSparse.add_argument('--vpr_fpga_bitstream_generator', action="store_true", - help="Generate FPGA-SPICE bitstream") +BSparse = parser.add_argument_group("FPGA-Bitstream Extension") +BSparse.add_argument( + "--vpr_fpga_bitstream_generator", action="store_true", help="Generate FPGA-SPICE bitstream" +) # Regression test option -RegParse = parser.add_argument_group('Regression Test Extension') -RegParse.add_argument("--end_flow_with_test", action="store_true", - help="Run verification test at the end") +RegParse = parser.add_argument_group("Regression Test Extension") +RegParse.add_argument( + "--end_flow_with_test", action="store_true", help="Run verification test at the end" +) # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Global varaibles declaration # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Setting up print and logging system -logging.basicConfig(level=logging.INFO, stream=sys.stdout, - format='%(levelname)s - %(message)s') -logger = logging.getLogger('OpenFPGA_Flow_Logs') +logging.basicConfig(level=logging.INFO, stream=sys.stdout, format="%(levelname)s - %(message)s") +logger = logging.getLogger("OpenFPGA_Flow_Logs") # variable to store script_configuration and cad tool paths config, cad_tools = None, None @@ -252,12 +331,12 @@ ExecTime = {} def main(): - logger.debug("Script Launched in "+os.getcwd()) + logger.debug("Script Launched in " + os.getcwd()) check_required_file() read_script_config() validate_command_line_arguments() prepare_run_directory(args.run_dir) - if (args.fpga_flow == "yosys_vpr"): + if args.fpga_flow == "yosys_vpr": logger.info('Running "yosys_vpr" Flow') run_yosys_with_abc() # TODO Make it optional if activity file is provided @@ -266,14 +345,14 @@ def main(): run_pro_blif_3arg() else: # Make a copy of the blif file to be compatible with vpr flow - shutil.copy(args.top_module+'_yosys_out.blif', args.top_module+".blif") + shutil.copy(args.top_module + "_yosys_out.blif", args.top_module + ".blif") # Always Generate the post-synthesis verilog files run_rewrite_verilog() - if (args.fpga_flow == "vpr_blif"): + if args.fpga_flow == "vpr_blif": collect_files_for_vpr() - if (args.fpga_flow == "yosys"): + if args.fpga_flow == "yosys": run_yosys_with_abc() if not (args.fpga_flow == "yosys"): logger.info("Running OpenFPGA Shell Engine ") @@ -282,26 +361,34 @@ def main(): run_netlists_verification() ExecTime["End"] = time.time() - def timestr(x): return humanize.naturaldelta(timedelta(seconds=x)) \ - if "humanize" in sys.modules else str(int(x)) + " Sec " - - if (args.fpga_flow == "yosys"): - TimeInfo = ("Openfpga_flow completed, " + - "Total Time Taken %s " % - timestr(ExecTime["End"]-ExecTime["Start"])) + + def timestr(x): + return ( + humanize.naturaldelta(timedelta(seconds=x)) + if "humanize" in sys.modules + else str(int(x)) + " Sec " + ) + + if args.fpga_flow == "yosys": + TimeInfo = "Openfpga_flow completed, " + "Total Time Taken %s " % timestr( + ExecTime["End"] - ExecTime["Start"] + ) else: - TimeInfo = ("Openfpga_flow completed, " + - "Total Time Taken %s " % - timestr(ExecTime["End"]-ExecTime["Start"]) + - "VPR Time %s " % - timestr(ExecTime["VPREnd"]-ExecTime["VPRStart"])) - TimeInfo += ("Verification Time %s " % - timestr(ExecTime["VerificationEnd"] - - ExecTime["VerificationStart"]) - if args.end_flow_with_test else "") + TimeInfo = ( + "Openfpga_flow completed, " + + "Total Time Taken %s " % timestr(ExecTime["End"] - ExecTime["Start"]) + + "VPR Time %s " % timestr(ExecTime["VPREnd"] - ExecTime["VPRStart"]) + ) + TimeInfo += ( + "Verification Time %s " + % timestr(ExecTime["VerificationEnd"] - ExecTime["VerificationStart"]) + if args.end_flow_with_test + else "" + ) logger.info(TimeInfo) exit() + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Subroutines starts here # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = @@ -310,8 +397,9 @@ def main(): def check_required_file(): """ Function ensure existace of all required files for the script """ files_dict = { - "CAD TOOL PATH": os.path.join(flow_script_dir, os.pardir, 'misc', - 'fpgaflow_default_tool_path.conf'), + "CAD TOOL PATH": os.path.join( + flow_script_dir, os.pardir, "misc", "fpgaflow_default_tool_path.conf" + ), } for filename, filepath in files_dict.items(): if not os.path.isfile(filepath): @@ -323,8 +411,9 @@ def read_script_config(): global config, cad_tools config = ConfigParser(interpolation=ExtendedInterpolation()) config.read_dict(script_env_vars) - default_cad_tool_conf = os.path.join(flow_script_dir, os.pardir, 'misc', - 'fpgaflow_default_tool_path.conf') + default_cad_tool_conf = os.path.join( + flow_script_dir, os.pardir, "misc", "fpgaflow_default_tool_path.conf" + ) config.read_file(open(default_cad_tool_conf)) if args.flow_config: config.read_file(open(args.flow_config)) @@ -336,14 +425,17 @@ def read_script_config(): _, file_extension = os.path.splitext(args.arch_variable_file) if file_extension in [".yml", ".yaml"]: script_env_vars["PATH"].update( - EnvYAML(args.arch_variable_file, include_environment=False)) - if file_extension in [".json", ]: + EnvYAML(args.arch_variable_file, include_environment=False) + ) + if file_extension in [ + ".json", + ]: with open(args.arch_variable_file, "r") as fp: script_env_vars["PATH"].update(json.load(fp)) def validate_command_line_arguments(): - ''' + """ This function validate the command line arguments FLOW_SCRIPT_CONFIG->valid_flows : Key is used to validate if the request flow is supported by the script @@ -357,7 +449,7 @@ def validate_command_line_arguments(): - Run directory - Activity file - Base verilog file - ''' + """ logger.info("Validating command line arguments") if args.debug: @@ -374,46 +466,43 @@ def validate_command_line_arguments(): dependent = dependent.split(",") for eachdep in dependent: if not any([getattr(args, i, 0) for i in eachdep.split("|")]): - clean_up_and_exit("'%s' argument depends on (%s) arguments" % - (eacharg, ", ".join(dependent).replace("|", " or "))) + clean_up_and_exit( + "'%s' argument depends on (%s) arguments" + % (eacharg, ", ".join(dependent).replace("|", " or ")) + ) # Check if architecrue files exists args.arch_file = os.path.abspath(args.arch_file) if not os.path.isfile(args.arch_file): - clean_up_and_exit( - "VPR architecture file not found. -%s"% - args.arch_file) + clean_up_and_exit("VPR architecture file not found. -%s" % args.arch_file) args.openfpga_arch_file = os.path.abspath(args.openfpga_arch_file) if not os.path.isfile(args.openfpga_arch_file): - clean_up_and_exit( - "OpenFPGA architecture file not found. -%s"% - args.openfpga_arch_file) + clean_up_and_exit("OpenFPGA architecture file not found. -%s" % args.openfpga_arch_file) # Filter provided benchmark files for index, everyinput in enumerate(args.benchmark_files): args.benchmark_files[index] = os.path.abspath(everyinput) if os.path.isdir(args.benchmark_files[index]): logger.warning("Skipping directory in bench %s" % everyinput) - logger.warning("Directory is not support in benchmark list" + - "use wildcard pattern to add files") + logger.warning( + "Directory is not support in benchmark list" + "use wildcard pattern to add files" + ) continue for everyfile in glob.glob(args.benchmark_files[index]): if not os.path.isfile(everyfile): - clean_up_and_exit( - "Failed to copy benchmark file -%s" % args.arch_file) + clean_up_and_exit("Failed to copy benchmark file -%s" % args.arch_file) # Filter provided powertech files if args.power_tech: args.power_tech = os.path.abspath(args.power_tech) if not os.path.isfile(args.power_tech): - clean_up_and_exit( - "Power Tech file not found. -%s" % args.power_tech) + clean_up_and_exit("Power Tech file not found. -%s" % args.power_tech) # Expand run directory to absolute path args.run_dir = os.path.abspath(args.run_dir) if args.power: - if args.activity_file: - args.activity_file = os.path.abspath(args.activity_file) + if args.activity_file: + args.activity_file = os.path.abspath(args.activity_file) if args.base_verilog: args.base_verilog = os.path.abspath(args.base_verilog) @@ -435,38 +524,37 @@ def prepare_run_directory(run_dir): # Create arch dir in run_dir and copy flattened architecture file os.mkdir("arch") - tmpl = Template( - open(args.arch_file, encoding='utf-8').read()) + tmpl = Template(open(args.arch_file, encoding="utf-8").read()) arch_filename = os.path.basename(args.arch_file) args.arch_file = os.path.join(run_dir, "arch", arch_filename) - with open(args.arch_file, 'w', encoding='utf-8') as archfile: + with open(args.arch_file, "w", encoding="utf-8") as archfile: archfile.write(tmpl.safe_substitute(script_env_vars["PATH"])) - if (args.openfpga_arch_file): - tmpl = Template( - open(args.openfpga_arch_file, encoding='utf-8').read()) + if args.openfpga_arch_file: + tmpl = Template(open(args.openfpga_arch_file, encoding="utf-8").read()) arch_filename = os.path.basename(args.openfpga_arch_file) args.openfpga_arch_file = os.path.join(run_dir, "arch", arch_filename) - with open(args.openfpga_arch_file, 'w', encoding='utf-8') as archfile: + with open(args.openfpga_arch_file, "w", encoding="utf-8") as archfile: archfile.write(tmpl.safe_substitute(script_env_vars["PATH"])) # Sanitize provided openshell template, if provided - if (args.openfpga_shell_template): + if args.openfpga_shell_template: if not os.path.isfile(args.openfpga_shell_template or ""): - logger.error("Openfpga shell file - %s" % - args.openfpga_shell_template) - clean_up_and_exit("Provided openfpga_shell_template" + - f" {args.openfpga_shell_template} file not found") + logger.error("Openfpga shell file - %s" % args.openfpga_shell_template) + clean_up_and_exit( + "Provided openfpga_shell_template" + + f" {args.openfpga_shell_template} file not found" + ) else: - shutil.copy(args.openfpga_shell_template, - args.top_module+"_template.openfpga") + shutil.copy(args.openfpga_shell_template, args.top_module + "_template.openfpga") # Create benchmark dir in run_dir and copy flattern architecture file os.mkdir("benchmark") try: for index, eachfile in enumerate(args.benchmark_files): args.benchmark_files[index] = shutil.copy2( - eachfile, os.path.join(os.curdir, "benchmark")) + eachfile, os.path.join(os.curdir, "benchmark") + ) except: logger.exception("Failed to copy all benchmark file to run_dir") @@ -477,13 +565,18 @@ def clean_up_and_exit(msg, clean=False): logger.error("Exiting . . . . . .") exit(1) + def create_yosys_params(): tree = ET.parse(args.arch_file) root = tree.getroot() try: - lut_size = max([int(pb_type.find("input").get("num_pins")) - for pb_type in root.iter("pb_type") - if pb_type.get("class") == "lut"]) + lut_size = max( + [ + int(pb_type.find("input").get("num_pins")) + for pb_type in root.iter("pb_type") + if pb_type.get("class") == "lut" + ] + ) logger.info("Extracted lut_size size from arch XML = %s", lut_size) logger.info("Running Yosys with lut_size = %s", lut_size) except: @@ -495,11 +588,12 @@ def create_yosys_params(): for indx in range(0, len(OpenFPGAArgs), 2): tmpVar = OpenFPGAArgs[indx][2:].upper() - ys_params[tmpVar] = OpenFPGAArgs[indx+1] + ys_params[tmpVar] = OpenFPGAArgs[indx + 1] if not args.verific: - ys_params["VERILOG_FILES"] = " ".join([ - shlex.quote(eachfile) for eachfile in args.benchmark_files]) + ys_params["VERILOG_FILES"] = " ".join( + [shlex.quote(eachfile) for eachfile in args.benchmark_files] + ) if not "READ_VERILOG_OPTIONS" in ys_params: ys_params["READ_VERILOG_OPTIONS"] = "" else: @@ -520,29 +614,34 @@ def create_yosys_params(): if "VERIFIC_VHDL_STANDARD" not in ys_params: ys_params["VERIFIC_VHDL_STANDARD"] = "-vhdl" ext_to_standard_map = { - ".v" : ys_params["VERIFIC_VERILOG_STANDARD"], - ".vh" : ys_params["VERIFIC_VERILOG_STANDARD"], - ".verilog" : ys_params["VERIFIC_VERILOG_STANDARD"], - ".vlg" : ys_params["VERIFIC_VERILOG_STANDARD"], - ".sv" : ys_params["VERIFIC_SYSTEMVERILOG_STANDARD"], - ".svh" : ys_params["VERIFIC_SYSTEMVERILOG_STANDARD"], - ".vhd" : ys_params["VERIFIC_VHDL_STANDARD"], - ".vhdl" : ys_params["VERIFIC_VHDL_STANDARD"] - } + ".v": ys_params["VERIFIC_VERILOG_STANDARD"], + ".vh": ys_params["VERIFIC_VERILOG_STANDARD"], + ".verilog": ys_params["VERIFIC_VERILOG_STANDARD"], + ".vlg": ys_params["VERIFIC_VERILOG_STANDARD"], + ".sv": ys_params["VERIFIC_SYSTEMVERILOG_STANDARD"], + ".svh": ys_params["VERIFIC_SYSTEMVERILOG_STANDARD"], + ".vhd": ys_params["VERIFIC_VHDL_STANDARD"], + ".vhdl": ys_params["VERIFIC_VHDL_STANDARD"], + } lib_files = [] include_dirs = set([os.path.dirname(eachfile) for eachfile in args.benchmark_files]) if "VERIFIC_INCLUDE_DIR" in ys_params: include_dirs.update(ys_params["VERIFIC_INCLUDE_DIR"].split(",")) if include_dirs and not ys_params["ADD_INCLUDE_DIR"]: - ys_params["ADD_INCLUDE_DIR"] = "\n".join(["verific -vlog-incdir " + - shlex.quote(eachdir) for eachdir in include_dirs]) + ys_params["ADD_INCLUDE_DIR"] = "\n".join( + ["verific -vlog-incdir " + shlex.quote(eachdir) for eachdir in include_dirs] + ) if "VERIFIC_LIBRARY_DIR" in ys_params: - ys_params["ADD_LIBRARY_DIR"] = "\n".join(["verific -vlog-libdir " + - shlex.quote(eachdir) for eachdir in ys_params["VERIFIC_LIBRARY_DIR"].split(",")]) + ys_params["ADD_LIBRARY_DIR"] = "\n".join( + [ + "verific -vlog-libdir " + shlex.quote(eachdir) + for eachdir in ys_params["VERIFIC_LIBRARY_DIR"].split(",") + ] + ) try: for param, value in ys_params.items(): if param.startswith("VERIFIC_READ_LIB_NAME"): - index = param[len("VERIFIC_READ_LIB_NAME"):] + index = param[len("VERIFIC_READ_LIB_NAME") :] src_param = "VERIFIC_READ_LIB_SRC" + index if src_param in ys_params: src_files = [] @@ -555,9 +654,11 @@ def create_yosys_params(): clean_up_and_exit("Failed to locate verific library files") lib_files.extend(src_files) filename, file_extension = os.path.splitext(src_files[0]) - ys_params["READ_LIBRARY"] += " ".join(["verific -work", - ys_params[param], ext_to_standard_map[file_extension]] + - [shlex.quote(eachfile) for eachfile in src_files] + ["\n"]) + ys_params["READ_LIBRARY"] += " ".join( + ["verific -work", ys_params[param], ext_to_standard_map[file_extension]] + + [shlex.quote(eachfile) for eachfile in src_files] + + ["\n"] + ) standard_to_sources = {} for eachfile in args.benchmark_files: if eachfile in lib_files: @@ -568,32 +669,56 @@ def create_yosys_params(): else: standard_to_sources[ext_to_standard_map[file_extension]] = [eachfile] for standard, sources in standard_to_sources.items(): - ys_params["READ_HDL_FILE"] += " ".join(["verific", - "-L " + ys_params["VERIFIC_SEARCH_LIB"] if "VERIFIC_SEARCH_LIB" in ys_params else "", - standard, " ".join([shlex.quote(src) for src in sources]), "\n"]) + ys_params["READ_HDL_FILE"] += " ".join( + [ + "verific", + "-L " + ys_params["VERIFIC_SEARCH_LIB"] + if "VERIFIC_SEARCH_LIB" in ys_params + else "", + standard, + " ".join([shlex.quote(src) for src in sources]), + "\n", + ] + ) except: logger.exception("Failed to determine design file type") clean_up_and_exit("") if "YOSYS_CELL_SIM_VERILOG" in ys_params: - ys_params["READ_HDL_FILE"] += " ".join(["verific", - ys_params["VERIFIC_VERILOG_STANDARD"], - ys_params["YOSYS_CELL_SIM_VERILOG"], "\n"]) + ys_params["READ_HDL_FILE"] += " ".join( + [ + "verific", + ys_params["VERIFIC_VERILOG_STANDARD"], + ys_params["YOSYS_CELL_SIM_VERILOG"], + "\n", + ] + ) if "YOSYS_CELL_SIM_SYSTEMVERILOG" in ys_params: - ys_params["READ_HDL_FILE"] += " ".join(["verific", - ys_params["VERIFIC_SYSTEMVERILOG_STANDARD"], - ys_params["YOSYS_CELL_SIM_SYSTEMVERILOG"], "\n"]) + ys_params["READ_HDL_FILE"] += " ".join( + [ + "verific", + ys_params["VERIFIC_SYSTEMVERILOG_STANDARD"], + ys_params["YOSYS_CELL_SIM_SYSTEMVERILOG"], + "\n", + ] + ) if "YOSYS_CELL_SIM_VHDL" in ys_params: - ys_params["READ_HDL_FILE"] += " ".join(["verific", - ys_params["VERIFIC_VHDL_STANDARD"], - ys_params["YOSYS_CELL_SIM_VHDL"], "\n"]) + ys_params["READ_HDL_FILE"] += " ".join( + [ + "verific", + ys_params["VERIFIC_VHDL_STANDARD"], + ys_params["YOSYS_CELL_SIM_VHDL"], + "\n", + ] + ) if "YOSYS_BLACKBOX_MODULES" in ys_params: - ys_params["ADD_BLACKBOX_MODULES"] = ("blackbox " + - " ".join(["\\" + mod for mod in ys_params["YOSYS_BLACKBOX_MODULES"].split(",")])) + ys_params["ADD_BLACKBOX_MODULES"] = "blackbox " + " ".join( + ["\\" + mod for mod in ys_params["YOSYS_BLACKBOX_MODULES"].split(",")] + ) ys_params["TOP_MODULE"] = args.top_module ys_params["LUT_SIZE"] = lut_size - ys_params["OUTPUT_BLIF"] = args.top_module+"_yosys_out.blif" - ys_params["OUTPUT_VERILOG"] = args.top_module+"_output_verilog.v" + ys_params["OUTPUT_BLIF"] = args.top_module + "_yosys_out.blif" + ys_params["OUTPUT_VERILOG"] = args.top_module + "_output_verilog.v" return ys_params @@ -603,14 +728,16 @@ def run_yosys_with_abc(): Execute yosys with ABC and optional blackbox support """ ys_params = create_yosys_params() - yosys_template = args.yosys_tmpl if args.yosys_tmpl else os.path.join( - cad_tools["misc_dir"], "ys_tmpl_yosys_vpr_flow.ys") - tmpl = Template(open(yosys_template, encoding='utf-8').read()) - with open("yosys.ys", 'w') as archfile: + yosys_template = ( + args.yosys_tmpl + if args.yosys_tmpl + else os.path.join(cad_tools["misc_dir"], "ys_tmpl_yosys_vpr_flow.ys") + ) + tmpl = Template(open(yosys_template, encoding="utf-8").read()) + with open("yosys.ys", "w") as archfile: archfile.write(tmpl.safe_substitute(ys_params)) - run_command("Run yosys", "yosys_output.log", - [cad_tools["yosys_path"], 'yosys.ys']) + run_command("Run yosys", "yosys_output.log", [cad_tools["yosys_path"], "yosys.ys"]) def run_odin2(): @@ -627,20 +754,20 @@ def run_abc_for_standarad(): def run_ace2(): if args.black_box_ace: - with open(args.top_module+'_yosys_out.blif', 'r') as fp: + with open(args.top_module + "_yosys_out.blif", "r") as fp: blif_lines = fp.readlines() - with open(args.top_module+'_bb.blif', 'w') as fp: + with open(args.top_module + "_bb.blif", "w") as fp: for eachline in blif_lines: if ".names" in eachline: input_nets = eachline.split()[1:] - if len(input_nets)-1 > args.K: - logger.error("One module in blif have more inputs" + - " than K value") + if len(input_nets) - 1 > args.K: + logger.error("One module in blif have more inputs" + " than K value") # Map CEll to each logic in blif - map_nets = (input_nets[:-1] + ["unconn"]*args.K)[:args.K] - map_nets = ["I[%d]=%s" % (indx, eachnet) - for indx, eachnet in enumerate(map_nets)] + map_nets = (input_nets[:-1] + ["unconn"] * args.K)[: args.K] + map_nets = [ + "I[%d]=%s" % (indx, eachnet) for indx, eachnet in enumerate(map_nets) + ] map_nets += ["O[0]=%s\n" % input_nets[-1]] fp.write(".subckt CELL ") fp.write(" ".join(map_nets)) @@ -648,35 +775,43 @@ def run_ace2(): fp.write(eachline) declar_input = " ".join(["I[%d]" % i for i in range(args.K)]) - model_tmpl = "\n" + \ - ".model CELL\n" + \ - ".inputs " + declar_input + " \n" + \ - ".outputs O[0]\n" + \ - ".blackbox\n" + \ - ".end\n" + model_tmpl = ( + "\n" + + ".model CELL\n" + + ".inputs " + + declar_input + + " \n" + + ".outputs O[0]\n" + + ".blackbox\n" + + ".end\n" + ) fp.write(model_tmpl) # Prepare ACE run command command = [ - "-b", args.top_module + - ('_bb.blif' if args.black_box_ace else "_yosys_out.blif"), - "-o", args.top_module+"_ace_out.act", - "-n", args.top_module+"_ace_out.blif", - "-c", "clk", + "-b", + args.top_module + ("_bb.blif" if args.black_box_ace else "_yosys_out.blif"), + "-o", + args.top_module + "_ace_out.act", + "-n", + args.top_module + "_ace_out.blif", + "-c", + "clk", ] command += ["-d", "%.4f" % args.ace_d] if args.ace_d else [""] command += ["-p", "%.4f" % args.ace_d] if args.ace_p else [""] try: - filename = args.top_module + '_ace2_output.txt' - with open(filename, 'w+') as output: - process = subprocess.run([cad_tools["ace_path"]] + command, - check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) + filename = args.top_module + "_ace2_output.txt" + with open(filename, "w+") as output: + process = subprocess.run( + [cad_tools["ace_path"]] + command, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) output.write(process.stdout) if process.returncode: - logger.info("ACE2 failed with returncode %d", - process.returncode) + logger.info("ACE2 failed with returncode %d", process.returncode) raise subprocess.CalledProcessError(0, command) except: logger.exception("Failed to run ACE2") @@ -686,23 +821,26 @@ def run_ace2(): def run_pro_blif_3arg(): command = [ - "-i", args.top_module+"_ace_out.blif", - "-o", args.top_module+".blif", - "-initial_blif", args.top_module+'_yosys_out.blif', + "-i", + args.top_module + "_ace_out.blif", + "-o", + args.top_module + ".blif", + "-initial_blif", + args.top_module + "_yosys_out.blif", ] try: - filename = args.top_module+'_blif_3args_output.txt' - with open(filename, 'w+') as output: - process = subprocess.run(["perl", cad_tools["pro_blif_path"]] + - command, - check=True, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) + filename = args.top_module + "_blif_3args_output.txt" + with open(filename, "w+") as output: + process = subprocess.run( + ["perl", cad_tools["pro_blif_path"]] + command, + check=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) output.write(process.stdout) if process.returncode: - logger.info("blif_3args script failed with returncode %d", - process.returncode) + logger.info("blif_3args script failed with returncode %d", process.returncode) except: logger.exception("Failed to run blif_3args") clean_up_and_exit("") @@ -715,56 +853,54 @@ def collect_files_for_vpr(): logger.error("Expecting Single Benchmark Blif file.") if not os.path.isfile(args.benchmark_files[0] or ""): clean_up_and_exit("Provided Blif file not found") - shutil.copy(args.benchmark_files[0], args.top_module+".blif") + shutil.copy(args.benchmark_files[0], args.top_module + ".blif") # Sanitize provided Activity file option if args.power: - if not os.path.isfile(args.activity_file or ""): - logger.error("Activity File - %s" % args.activity_file) - clean_up_and_exit("Provided activity file not found") - shutil.copy(args.activity_file, args.top_module+"_ace_out.act") + if not os.path.isfile(args.activity_file or ""): + logger.error("Activity File - %s" % args.activity_file) + clean_up_and_exit("Provided activity file not found") + shutil.copy(args.activity_file, args.top_module + "_ace_out.act") else: - if os.path.isfile(args.activity_file): - shutil.copy(args.activity_file, args.top_module+"_ace_out.act") + if os.path.isfile(args.activity_file): + shutil.copy(args.activity_file, args.top_module + "_ace_out.act") # Sanitize provided Benchmark option if not os.path.isfile(args.base_verilog or ""): logger.error("Base Verilog File - %s" % args.base_verilog) clean_up_and_exit("Provided base_verilog file not found") - shutil.copy(args.base_verilog, args.top_module+"_output_verilog.v") + shutil.copy(args.base_verilog, args.top_module + "_output_verilog.v") def run_openfpga_shell(): ExecTime["VPRStart"] = time.time() # bench_blif, fixed_chan_width, logfile, route_only=False - tmpl = Template(open(args.top_module+"_template.openfpga", - encoding='utf-8').read()) + tmpl = Template(open(args.top_module + "_template.openfpga", encoding="utf-8").read()) path_variables = script_env_vars["PATH"] path_variables["TOP_MODULE"] = args.top_module path_variables["VPR_ARCH_FILE"] = args.arch_file path_variables["OPENFPGA_ARCH_FILE"] = args.openfpga_arch_file - path_variables["VPR_TESTBENCH_BLIF"] = args.top_module+".blif" - path_variables["ACTIVITY_FILE"] = args.top_module+"_ace_out.act" - path_variables["REFERENCE_VERILOG_TESTBENCH"] = args.top_module + \ - "_output_verilog.v" + path_variables["VPR_TESTBENCH_BLIF"] = args.top_module + ".blif" + path_variables["ACTIVITY_FILE"] = args.top_module + "_ace_out.act" + path_variables["REFERENCE_VERILOG_TESTBENCH"] = args.top_module + "_output_verilog.v" for indx in range(0, len(OpenFPGAArgs), 2): tmpVar = OpenFPGAArgs[indx][2:].upper() - path_variables[tmpVar] = OpenFPGAArgs[indx+1] + path_variables[tmpVar] = OpenFPGAArgs[indx + 1] - with open(args.top_module+"_run.openfpga", 'w', encoding='utf-8') as archfile: + with open(args.top_module + "_run.openfpga", "w", encoding="utf-8") as archfile: archfile.write(tmpl.safe_substitute(path_variables)) - command = [cad_tools["openfpga_shell_path"], "-batch", "-f", - args.top_module+"_run.openfpga"] + command = [cad_tools["openfpga_shell_path"], "-batch", "-f", args.top_module + "_run.openfpga"] run_command("OpenFPGA Shell Run", "openfpgashell.log", command) ExecTime["VPREnd"] = time.time() extract_vpr_stats("openfpgashell.log") def extract_vpr_stats(logfile, r_filename="vpr_stat", parse_section="vpr"): - section = "DEFAULT_PARSE_RESULT_POWER" if parse_section == "power" \ - else "DEFAULT_PARSE_RESULT_VPR" + section = ( + "DEFAULT_PARSE_RESULT_POWER" if parse_section == "power" else "DEFAULT_PARSE_RESULT_VPR" + ) vpr_log = open(logfile).read() resultDict = {} for name, value in config.items(section): @@ -773,7 +909,7 @@ def extract_vpr_stats(logfile, r_filename="vpr_stat", parse_section="vpr"): if match: try: if "lambda" in filt_function.strip(): - eval("ParseFunction = "+filt_function.strip()) + eval("ParseFunction = " + filt_function.strip()) extract_val = ParseFunction(**match.groups()) elif filt_function.strip() == "int": extract_val = int(match.group(1)) @@ -783,11 +919,17 @@ def extract_vpr_stats(logfile, r_filename="vpr_stat", parse_section="vpr"): extract_val = str(match.group(1)) elif filt_function.strip() == "scientific": try: - mult = {"m": 1E-3, "u": 1E-6, "n": 1E-9, - "K": 1E-3, "M": 1E-6, "G": 1E-9, }.get(match.group(2)[0], 1) + mult = { + "m": 1e-3, + "u": 1e-6, + "n": 1e-9, + "K": 1e-3, + "M": 1e-6, + "G": 1e-9, + }.get(match.group(2)[0], 1) except: mult = 1 - extract_val = float(match.group(1))*mult + extract_val = float(match.group(1)) * mult else: extract_val = match.group(1) except: @@ -798,10 +940,9 @@ def extract_vpr_stats(logfile, r_filename="vpr_stat", parse_section="vpr"): dummyparser = ConfigParser() dummyparser.read_dict({"RESULTS": resultDict}) - with open(r_filename+'.result', 'w') as configfile: + with open(r_filename + ".result", "w") as configfile: dummyparser.write(configfile) - logger.info("%s result extracted in file %s" % - (parse_section, r_filename+'.result')) + logger.info("%s result extracted in file %s" % (parse_section, r_filename + ".result")) def run_rewrite_verilog(): @@ -810,8 +951,8 @@ def run_rewrite_verilog(): # If there is a template script provided, replace parameters from configuration if not args.ys_rewrite_tmpl: script_cmd = [ - "read_blif %s" % args.top_module+".blif", - "write_verilog %s" % args.top_module+"_output_verilog.v" + "read_blif %s" % args.top_module + ".blif", + "write_verilog %s" % args.top_module + "_output_verilog.v", ] command = [cad_tools["yosys_path"], "-p", "; ".join(script_cmd)] run_command("Yosys", "yosys_rewrite.log", command) @@ -822,21 +963,23 @@ def run_rewrite_verilog(): # Split a series of scripts by delim ';' # And execute the scripts serially for iteration_idx, curr_rewrite_tmpl in enumerate(args.ys_rewrite_tmpl.split(";")): - tmpl = Template(open(curr_rewrite_tmpl, encoding='utf-8').read()) + tmpl = Template(open(curr_rewrite_tmpl, encoding="utf-8").read()) logger.info("Yosys rewrite iteration: " + str(iteration_idx)) - with open("yosys_rewrite_" + str(iteration_idx) + ".ys", 'w') as archfile: + with open("yosys_rewrite_" + str(iteration_idx) + ".ys", "w") as archfile: archfile.write(tmpl.safe_substitute(ys_rewrite_params)) - run_command("Run yosys", "yosys_rewrite_output_" + str(iteration_idx) + ".log", - [cad_tools["yosys_path"], "yosys_rewrite_" + str(iteration_idx) + ".ys"]) - + run_command( + "Run yosys", + "yosys_rewrite_output_" + str(iteration_idx) + ".log", + [cad_tools["yosys_path"], "yosys_rewrite_" + str(iteration_idx) + ".ys"], + ) def run_netlists_verification(exit_if_fail=True): ExecTime["VerificationStart"] = time.time() - compiled_file = "compiled_"+args.top_module + compiled_file = "compiled_" + args.top_module # include_netlists = args.top_module+"_include_netlists.v" - tb_top_formal = args.top_module+"_top_formal_verification_random_tb" - tb_top_autochecked = args.top_module+"_autocheck_top_tb" + tb_top_formal = args.top_module + "_top_formal_verification_random_tb" + tb_top_autochecked = args.top_module + "_autocheck_top_tb" # netlists_path = args.vpr_fpga_verilog_dir_val+"/SRC/" command = [cad_tools["iverilog_path"]] @@ -866,21 +1009,19 @@ def run_netlists_verification(exit_if_fail=True): def run_command(taskname, logfile, command, exit_if_fail=True): logger.info("Launching %s " % taskname) - with open(logfile, 'w') as output: + with open(logfile, "w") as output: try: - output.write(" ".join(command)+"\n") - process = subprocess.run(command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) + output.write(" ".join(command) + "\n") + process = subprocess.run( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True + ) output.write(process.stdout) output.write(process.stderr) output.write(str(process.returncode)) if "openfpgashell" in logfile: filter_openfpga_output(process.stdout) if process.returncode: - logger.error("%s run failed with returncode %d" % - (taskname, process.returncode)) + logger.error("%s run failed with returncode %d" % (taskname, process.returncode)) logger.error("command %s" % " ".join(command)) filter_failed_process_output(process.stderr) if exit_if_fail: @@ -899,8 +1040,7 @@ def filter_openfpga_output(vpr_output): try: for i in range(50): if "Version:" in next(stdout): - logger.info("OpenFPGAShell %s %s" % - (next(stdout), next(stdout))) + logger.info("OpenFPGAShell %s %s" % (next(stdout), next(stdout))) break except StopIteration: pass diff --git a/openfpga_flow/scripts/run_fpga_task.py b/openfpga_flow/scripts/run_fpga_task.py index 7cf4aa83d..093d32db3 100644 --- a/openfpga_flow/scripts/run_fpga_task.py +++ b/openfpga_flow/scripts/run_fpga_task.py @@ -39,55 +39,62 @@ if sys.version_info[0] < 3: # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = LOG_FORMAT = "%(levelname)5s (%(threadName)15s) - %(message)s" if util.find_spec("coloredlogs"): - coloredlogs.install(level='INFO', stream=sys.stdout, - fmt=LOG_FORMAT) + coloredlogs.install(level="INFO", stream=sys.stdout, fmt=LOG_FORMAT) else: - logging.basicConfig(level=logging.INFO, stream=sys.stdout, - format=LOG_FORMAT) -logger = logging.getLogger('OpenFPGA_Task_logs') + logging.basicConfig(level=logging.INFO, stream=sys.stdout, format=LOG_FORMAT) +logger = logging.getLogger("OpenFPGA_Task_logs") # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Read commandline arguments # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = parser = argparse.ArgumentParser() -parser.add_argument('tasks', nargs='+') -parser.add_argument('--maxthreads', type=int, default=2, - help="Number of fpga_flow threads to run default = 2," + - "Typically <= Number of processors on the system") -parser.add_argument('--remove_run_dir', type=str, - help="Remove run dir " + - "'all' to remove all." + - ", to remove specific run dir" + - "- To remove range of directory") -parser.add_argument('--config', help="Override default configuration") -parser.add_argument('--test_run', action="store_true", - help="Dummy run shows final generated VPR commands") -parser.add_argument('--debug', action="store_true", - help="Run script in debug mode") -parser.add_argument('--continue_on_fail', action="store_true", - help="Exit script with return code") -parser.add_argument('--show_thread_logs', action="store_true", - help="Skips logs from running thread") +parser.add_argument("tasks", nargs="+") +parser.add_argument( + "--maxthreads", + type=int, + default=2, + help="Number of fpga_flow threads to run default = 2," + + "Typically <= Number of processors on the system", +) +parser.add_argument( + "--remove_run_dir", + type=str, + help="Remove run dir " + + "'all' to remove all." + + ", to remove specific run dir" + + "- To remove range of directory", +) +parser.add_argument("--config", help="Override default configuration") +parser.add_argument( + "--test_run", action="store_true", help="Dummy run shows final generated VPR commands" +) +parser.add_argument("--debug", action="store_true", help="Run script in debug mode") +parser.add_argument("--continue_on_fail", action="store_true", help="Exit script with return code") +parser.add_argument( + "--show_thread_logs", action="store_true", help="Skips logs from running thread" +) args = parser.parse_args() # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Read script configuration file # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = task_script_dir = os.path.dirname(os.path.abspath(__file__)) -script_env_vars = ({"PATH": { - "OPENFPGA_FLOW_PATH": task_script_dir, - "ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), - "OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"), - "BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), - "TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), - "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), - "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), - "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, - os.pardir))}}) +script_env_vars = { + "PATH": { + "OPENFPGA_FLOW_PATH": task_script_dir, + "ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), + "OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"), + "BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), + "TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), + "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), + "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), + "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)), + } +} config = ConfigParser(interpolation=ExtendedInterpolation()) config.read_dict(script_env_vars) -config.read_file(open(os.path.join(task_script_dir, 'run_fpga_task.conf'))) +config.read_file(open(os.path.join(task_script_dir, "run_fpga_task.conf"))) gc = config["GENERAL CONFIGURATION"] @@ -109,6 +116,7 @@ def main(): logger.info("Task execution completed") exit(0) + # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Subroutines starts here # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = @@ -151,14 +159,13 @@ def remove_run_dir(): try: for eachdir in remove_dir: - logger.info('Removing run_dir %s' % (eachdir)) - if os.path.exists('latest'): - if eachdir == os.readlink('latest'): + logger.info("Removing run_dir %s" % (eachdir)) + if os.path.exists("latest"): + if eachdir == os.readlink("latest"): remove_dir += ["latest"] shutil.rmtree(eachdir, ignore_errors=True) except: - logger.exception("Failed to remove %s run directory" % - (eachdir or "Unknown")) + logger.exception("Failed to remove %s run directory" % (eachdir or "Unknown")) def generate_each_task_actions(taskname): @@ -169,7 +176,7 @@ def generate_each_task_actions(taskname): # Check if task directory exists and consistent local_tasks = os.path.join(*(taskname)) repo_tasks = os.path.join(gc["task_dir"], *(taskname)) - abs_tasks = os.path.abspath('/' + local_tasks) + abs_tasks = os.path.abspath("/" + local_tasks) if os.path.isdir(local_tasks): os.chdir(local_tasks) curr_task_dir = os.path.abspath(os.getcwd()) @@ -178,52 +185,60 @@ def generate_each_task_actions(taskname): elif os.path.isdir(repo_tasks): curr_task_dir = repo_tasks else: - clean_up_and_exit("Task directory [%s] not found" % taskname + - " locally at [%s]" % local_tasks + - ", absolutely at [%s]" % abs_tasks + - ", or in OpenFPGA task directory [%s]" % repo_tasks) + clean_up_and_exit( + "Task directory [%s] not found" % taskname + + " locally at [%s]" % local_tasks + + ", absolutely at [%s]" % abs_tasks + + ", or in OpenFPGA task directory [%s]" % repo_tasks + ) os.chdir(curr_task_dir) curr_task_conf_file = os.path.join(curr_task_dir, "config", "task.conf") if not os.path.isfile(curr_task_conf_file): - clean_up_and_exit( - "Missing configuration file for task %s" % curr_task_dir) + clean_up_and_exit("Missing configuration file for task %s" % curr_task_dir) if args.remove_run_dir: remove_run_dir() flow_run_cmd_list = [] GeneralSection = [] - return flow_run_cmd_list,GeneralSection + return flow_run_cmd_list, GeneralSection # Create run directory for current task run ./runxxx - run_dirs = [int(os.path.basename(x)[-3:]) for x in glob.glob('run*[0-9]')] - curr_run_dir = "run%03d" % (max(run_dirs+[0, ])+1) + run_dirs = [int(os.path.basename(x)[-3:]) for x in glob.glob("run*[0-9]")] + curr_run_dir = "run%03d" % ( + max( + run_dirs + + [ + 0, + ] + ) + + 1 + ) try: os.mkdir(curr_run_dir) - if os.path.islink('latest') or os.path.exists('latest'): + if os.path.islink("latest") or os.path.exists("latest"): os.remove("latest") os.symlink(curr_run_dir, "latest") - logger.info('Created "%s" directory for current task run' % - curr_run_dir) + logger.info('Created "%s" directory for current task run' % curr_run_dir) except: logger.exception("") logger.error("Failed to create new run directory in task directory") os.chdir(curr_run_dir) # Read task configuration file and check consistency - task_conf = ConfigParser(allow_no_value=True, - interpolation=ExtendedInterpolation()) - script_env_vars['PATH']["TASK_NAME"] = "/".join(taskname) - script_env_vars['PATH']["TASK_DIR"] = curr_task_dir + task_conf = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation()) + script_env_vars["PATH"]["TASK_NAME"] = "/".join(taskname) + script_env_vars["PATH"]["TASK_DIR"] = curr_task_dir task_conf.read_dict(script_env_vars) task_conf.read_file(open(curr_task_conf_file)) required_sec = ["GENERAL", "BENCHMARKS", "ARCHITECTURES"] - missing_section = list(set(required_sec)-set(task_conf.sections())) + missing_section = list(set(required_sec) - set(task_conf.sections())) if missing_section: - clean_up_and_exit("Missing sections %s" % " ".join(missing_section) + - " in task configuration file") + clean_up_and_exit( + "Missing sections %s" % " ".join(missing_section) + " in task configuration file" + ) # Declare varibles to access sections TaskFileSections = task_conf.sections() @@ -238,14 +253,12 @@ def generate_each_task_actions(taskname): if os.path.isfile(arch_full_path): archfile_list.append(arch_full_path) else: - clean_up_and_exit("Architecture file not found: " + - "%s " % arch_file) + clean_up_and_exit("Architecture file not found: " + "%s " % arch_file) if not len(archfile_list) == len(list(set(archfile_list))): clean_up_and_exit("Found duplicate architectures in config file") # Get Flow information - logger.info('Running "%s" flow', - GeneralSection.get("fpga_flow", fallback="yosys_vpr")) + logger.info('Running "%s" flow', GeneralSection.get("fpga_flow", fallback="yosys_vpr")) # Check if specified benchmark files exist benchmark_list = [] @@ -258,8 +271,9 @@ def generate_each_task_actions(taskname): for eachpath in each_benchmark.split(","): files = glob.glob(eachpath) if not len(files): - clean_up_and_exit(("No files added benchmark %s" % bech_name) + - " with path %s " % (eachpath)) + clean_up_and_exit( + ("No files added benchmark %s" % bech_name) + " with path %s " % (eachpath) + ) bench_files += files # Read provided benchmark configurations @@ -289,53 +303,56 @@ def generate_each_task_actions(taskname): "verific_vhdl_standard", "verific_include_dir", "verific_library_dir", - "verific_search_lib" + "verific_search_lib", ] yosys_params_common = {} for param in yosys_params: - yosys_params_common[param.upper()] = SynthSection.get("bench_"+param+"_common") + yosys_params_common[param.upper()] = SynthSection.get("bench_" + param + "_common") # Individual benchmark configuration CurrBenchPara["files"] = bench_files - CurrBenchPara["top_module"] = SynthSection.get(bech_name+"_top", - fallback="top") - CurrBenchPara["ys_script"] = SynthSection.get(bech_name+"_yosys", - fallback=ys_for_task_common) - CurrBenchPara["ys_rewrite_script"] = SynthSection.get(bech_name+"_yosys_rewrite", - fallback=ys_rewrite_for_task_common) - CurrBenchPara["chan_width"] = SynthSection.get(bech_name+"_chan_width", - fallback=chan_width_common) + CurrBenchPara["top_module"] = SynthSection.get(bech_name + "_top", fallback="top") + CurrBenchPara["ys_script"] = SynthSection.get( + bech_name + "_yosys", fallback=ys_for_task_common + ) + CurrBenchPara["ys_rewrite_script"] = SynthSection.get( + bech_name + "_yosys_rewrite", fallback=ys_rewrite_for_task_common + ) + CurrBenchPara["chan_width"] = SynthSection.get( + bech_name + "_chan_width", fallback=chan_width_common + ) CurrBenchPara["benchVariable"] = [] for eachKey, eachValue in SynthSection.items(): if bech_name in eachKey: - eachKey = eachKey.replace(bech_name+"_", "").upper() + eachKey = eachKey.replace(bech_name + "_", "").upper() CurrBenchPara["benchVariable"] += [f"--{eachKey}", eachValue] - + for param, value in yosys_params_common.items(): if not param in CurrBenchPara["benchVariable"] and value: CurrBenchPara["benchVariable"] += [f"--{param}", value] if GeneralSection.get("fpga_flow") == "vpr_blif": # Check if activity file exist only when power analysis is required - if (GeneralSection.getboolean("power_analysis")): - if not SynthSection.get(bech_name+"_act"): - clean_up_and_exit("Missing argument %s" % (bech_name+"_act") + - "for vpr_blif flow") - CurrBenchPara["activity_file"] = SynthSection.get(bech_name+"_act") + if GeneralSection.getboolean("power_analysis"): + if not SynthSection.get(bech_name + "_act"): + clean_up_and_exit( + "Missing argument %s" % (bech_name + "_act") + "for vpr_blif flow" + ) + CurrBenchPara["activity_file"] = SynthSection.get(bech_name + "_act") else: # If users defined an activity file, we use it otherwise create a dummy act - if not SynthSection.get(bech_name+"_act"): - CurrBenchPara["activity_file"] = bech_name+"_act" + if not SynthSection.get(bech_name + "_act"): + CurrBenchPara["activity_file"] = bech_name + "_act" else: - CurrBenchPara["activity_file"] = SynthSection.get(bech_name+"_act") + CurrBenchPara["activity_file"] = SynthSection.get(bech_name + "_act") # Check if base verilog file exists - if not SynthSection.get(bech_name+"_verilog"): - clean_up_and_exit("Missing argument %s for vpr_blif flow" % - (bech_name+"_verilog")) - CurrBenchPara["verilog_file"] = SynthSection.get( - bech_name+"_verilog") + if not SynthSection.get(bech_name + "_verilog"): + clean_up_and_exit( + "Missing argument %s for vpr_blif flow" % (bech_name + "_verilog") + ) + CurrBenchPara["verilog_file"] = SynthSection.get(bech_name + "_verilog") # Add script parameter list in current benchmark ScriptSections = [x for x in TaskFileSections if "SCRIPT_PARAM" in x] @@ -343,7 +360,7 @@ def generate_each_task_actions(taskname): for eachset in ScriptSections: command = [] for key, values in task_conf[eachset].items(): - command += ["--"+key, values] if values else ["--"+key] + command += ["--" + key, values] if values else ["--" + key] # Set label for Sript Parameters set_lbl = eachset.replace("SCRIPT_PARAM", "") @@ -358,7 +375,7 @@ def generate_each_task_actions(taskname): # which are uniquified benchmark_top_module_count = [] for bench in benchmark_list: - benchmark_top_module_count.append(bench["top_module"]) + benchmark_top_module_count.append(bench["top_module"]) # Create OpenFPGA flow run commnad for each combination of # architecture, benchmark and parameters @@ -367,31 +384,42 @@ def generate_each_task_actions(taskname): for indx, arch in enumerate(archfile_list): for bench in benchmark_list: for lbl, param in bench["script_params"].items(): - if (benchmark_top_module_count.count(bench["top_module"]) > 1): - flow_run_dir = get_flow_rundir(arch, "bench" + str(benchmark_list.index(bench)) + "_" + bench["top_module"], lbl) + if benchmark_top_module_count.count(bench["top_module"]) > 1: + flow_run_dir = get_flow_rundir( + arch, + "bench" + str(benchmark_list.index(bench)) + "_" + bench["top_module"], + lbl, + ) else: - flow_run_dir = get_flow_rundir(arch, bench["top_module"], lbl) + flow_run_dir = get_flow_rundir(arch, bench["top_module"], lbl) command = create_run_command( curr_job_dir=flow_run_dir, archfile=arch, benchmark_obj=bench, param=param, - task_conf=task_conf) - flow_run_cmd_list.append({ - "arch": arch, - "bench": bench, - "name": "%02d_%s_%s" % (indx, bench["top_module"], lbl), - "run_dir": flow_run_dir, - "commands": command + bench["benchVariable"], - "finished": False, - "status": False}) + task_conf=task_conf, + ) + flow_run_cmd_list.append( + { + "arch": arch, + "bench": bench, + "name": "%02d_%s_%s" % (indx, bench["top_module"], lbl), + "run_dir": flow_run_dir, + "commands": command + bench["benchVariable"], + "finished": False, + "status": False, + } + ) - logger.info('Found %d Architectures %d Benchmarks & %d Script Parameters' % - (len(archfile_list), len(benchmark_list), len(ScriptSections))) - logger.info('Created total %d jobs' % len(flow_run_cmd_list)) + logger.info( + "Found %d Architectures %d Benchmarks & %d Script Parameters" + % (len(archfile_list), len(benchmark_list), len(ScriptSections)) + ) + logger.info("Created total %d jobs" % len(flow_run_cmd_list)) + + return flow_run_cmd_list, GeneralSection - return flow_run_cmd_list,GeneralSection # Make the directory name unique by including the benchmark index in the list. # This is because benchmarks may share the same top module names @@ -401,7 +429,7 @@ def get_flow_rundir(arch, top_module, flow_params=None): path = [ os.path.basename(arch).replace(".xml", ""), top_module, - flow_params if flow_params else "common" + flow_params if flow_params else "common", ] return os.path.abspath(os.path.join(*path)) @@ -421,8 +449,8 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf): if os.path.isdir(curr_job_dir): question = "One the result directory already exist.\n" question += "%s\n" % curr_job_dir - reply = str(input(question+' (y/n): ')).lower().strip() - if reply[:1] in ['y', 'yes']: + reply = str(input(question + " (y/n): ")).lower().strip() + if reply[:1] in ["y", "yes"]: shutil.rmtree(curr_job_dir) else: logger.info("Result directory removal denied by the user") @@ -444,8 +472,7 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf): if task_gc.get("run_engine") == "openfpga_shell": for eachKey in task_OFPGAc.keys(): - command += [f"--{eachKey}", - task_OFPGAc.get(f"{eachKey}")] + command += [f"--{eachKey}", task_OFPGAc.get(f"{eachKey}")] if benchmark_obj.get("activity_file"): command += ["--activity_file", benchmark_obj.get("activity_file")] @@ -485,8 +512,7 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf): def strip_child_logger_info(line): try: logtype, message = line.split(" - ", 1) - lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30, - "INFO": 20, "DEBUG": 10, "NOTSET": 0} + lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30, "INFO": 20, "DEBUG": 10, "NOTSET": 0} logger.log(lognumb[logtype.strip().upper()], message) except: logger.info(line) @@ -498,18 +524,22 @@ def run_single_script(s, eachJob, job_list): eachJob["starttime"] = time.time() try: logfile = "%s_out.log" % thread_name - with open(logfile, 'w+') as output: - output.write("* "*20 + '\n') + with open(logfile, "w+") as output: + output.write("* " * 20 + "\n") output.write("RunDirectory : %s\n" % os.getcwd()) - command = [os.getenv('PYTHON_EXEC', gc["python_path"]), gc["script_default"]] + \ - eachJob["commands"] - output.write(" ".join(command) + '\n') - output.write("* "*20 + '\n') + command = [ + os.getenv("PYTHON_EXEC", gc["python_path"]), + gc["script_default"], + ] + eachJob["commands"] + output.write(" ".join(command) + "\n") + output.write("* " * 20 + "\n") logger.debug("Running OpenFPGA flow with [%s]" % command) - process = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) for line in process.stdout: if args.show_thread_logs: strip_child_logger_info(line[:-1]) @@ -520,27 +550,30 @@ def run_single_script(s, eachJob, job_list): raise subprocess.CalledProcessError(0, " ".join(command)) eachJob["status"] = True except: - logger.exception("Failed to execute openfpga flow - %s", - eachJob["name"]) + logger.exception("Failed to execute openfpga flow - %s", eachJob["name"]) if not args.continue_on_fail: os._exit(1) eachJob["endtime"] = time.time() - timediff = timedelta(seconds=(eachJob["endtime"]-eachJob["starttime"])) - timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules \ - else str(timediff) - logger.info("%s Finished with returncode %d, Time Taken %s " , - thread_name, process.returncode, timestr) + timediff = timedelta(seconds=(eachJob["endtime"] - eachJob["starttime"])) + timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules else str(timediff) + logger.info( + "%s Finished with returncode %d, Time Taken %s ", + thread_name, + process.returncode, + timestr, + ) eachJob["finished"] = True no_of_finished_job = sum([not eachJ["finished"] for eachJ in job_list]) - logger.info("***** %d runs pending *****" , no_of_finished_job) + logger.info("***** %d runs pending *****", no_of_finished_job) def run_actions(job_list): thread_sema = threading.Semaphore(args.maxthreads) thread_list = [] for _, eachjob in enumerate(job_list): - t = threading.Thread(target=run_single_script, name=eachjob["name"], - args=(thread_sema, eachjob, job_list)) + t = threading.Thread( + target=run_single_script, name=eachjob["name"], args=(thread_sema, eachjob, job_list) + ) t.start() thread_list.append(t) for eachthread in thread_list: @@ -558,22 +591,19 @@ def collect_results(job_run_list): logger.info("No result files found for %s" % run["name"]) # Read and merge result file - vpr_res = ConfigParser(allow_no_value=True, - interpolation=ExtendedInterpolation()) - vpr_res.read_file( - open(os.path.join(run["run_dir"], "vpr_stat.result"))) + vpr_res = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation()) + vpr_res.read_file(open(os.path.join(run["run_dir"], "vpr_stat.result"))) result = OrderedDict() result["name"] = run["name"] - result["TotalRunTime"] = int(run["endtime"]-run["starttime"]) + result["TotalRunTime"] = int(run["endtime"] - run["starttime"]) result.update(vpr_res["RESULTS"]) task_result.append(result) colnames = [] for eachLbl in task_result: colnames.extend(eachLbl.keys()) if len(task_result): - with open("task_result.csv", 'w', newline='') as csvfile: - writer = csv.DictWriter( - csvfile, extrasaction='ignore', fieldnames=list(colnames)) + with open("task_result.csv", "w", newline="") as csvfile: + writer = csv.DictWriter(csvfile, extrasaction="ignore", fieldnames=list(colnames)) writer.writeheader() for eachResult in task_result: writer.writerow(eachResult) diff --git a/openfpga_flow/scripts/run_modelsim.py b/openfpga_flow/scripts/run_modelsim.py index 4151ebd4b..3fca5d5ba 100644 --- a/openfpga_flow/scripts/run_modelsim.py +++ b/openfpga_flow/scripts/run_modelsim.py @@ -15,57 +15,67 @@ from configparser import ConfigParser, ExtendedInterpolation # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Configure logging system # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = -FILE_LOG_FORMAT = '%(levelname)s (%(threadName)10s) - %(message)s' -logging.basicConfig(level=logging.INFO, stream=sys.stdout, - format='%(levelname)s (%(threadName)10s) - %(message)s') -logger = logging.getLogger('Modelsim_run_log') +FILE_LOG_FORMAT = "%(levelname)s (%(threadName)10s) - %(message)s" +logging.basicConfig( + level=logging.INFO, stream=sys.stdout, format="%(levelname)s (%(threadName)10s) - %(message)s" +) +logger = logging.getLogger("Modelsim_run_log") # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Parse commandline arguments # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = parser = argparse.ArgumentParser() -parser.add_argument('files', nargs='+', - help="Pass SimulationDeckInfo generated by OpenFPGA flow" + - " or pass taskname ") -parser.add_argument('--maxthreads', type=int, default=2, - help="Number of fpga_flow threads to run default = 2," + - "Typically <= Number of processors on the system") -parser.add_argument('--debug', action="store_true", - help="Run script in debug mode") -parser.add_argument('--modelsim_proc_tmpl', type=str, - help="Modelsim proc template file") -parser.add_argument('--modelsim_runsim_tmpl', type=str, - help="Modelsim runsim template file") -parser.add_argument('--run_sim', action="store_true", - help="Execute generated script in formality") -parser.add_argument('--modelsim_proj_name', - help="Provide modelsim project name") -parser.add_argument('--modelsim_ini', type=str, - default="/uusoc/facility/cad_tools/Mentor/modelsim10.7b/modeltech/modelsim.ini", - help="Skip any confirmation") -parser.add_argument('--skip_prompt', action='store_true', - help='Skip any confirmation') -parser.add_argument('--ini_filename', type=str, - default="simulation_deck_info.ini", - help='default INI filename in in fun dir') +parser.add_argument( + "files", + nargs="+", + help="Pass SimulationDeckInfo generated by OpenFPGA flow" + + " or pass taskname ", +) +parser.add_argument( + "--maxthreads", + type=int, + default=2, + help="Number of fpga_flow threads to run default = 2," + + "Typically <= Number of processors on the system", +) +parser.add_argument("--debug", action="store_true", help="Run script in debug mode") +parser.add_argument("--modelsim_proc_tmpl", type=str, help="Modelsim proc template file") +parser.add_argument("--modelsim_runsim_tmpl", type=str, help="Modelsim runsim template file") +parser.add_argument("--run_sim", action="store_true", help="Execute generated script in formality") +parser.add_argument("--modelsim_proj_name", help="Provide modelsim project name") +parser.add_argument( + "--modelsim_ini", + type=str, + default="/uusoc/facility/cad_tools/Mentor/modelsim10.7b/modeltech/modelsim.ini", + help="Skip any confirmation", +) +parser.add_argument("--skip_prompt", action="store_true", help="Skip any confirmation") +parser.add_argument( + "--ini_filename", + type=str, + default="simulation_deck_info.ini", + help="default INI filename in in fun dir", +) args = parser.parse_args() # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = # Read script configuration file # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = task_script_dir = os.path.dirname(os.path.abspath(__file__)) -script_env_vars = ({"PATH": { - "OPENFPGA_FLOW_PATH": task_script_dir, - "ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), - "BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), - "TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), - "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), - "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), - "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, - os.pardir))}}) +script_env_vars = { + "PATH": { + "OPENFPGA_FLOW_PATH": task_script_dir, + "ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), + "BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), + "TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), + "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), + "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), + "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)), + } +} config = ConfigParser(interpolation=ExtendedInterpolation()) config.read_dict(script_env_vars) -config.read_file(open(os.path.join(task_script_dir, 'run_fpga_task.conf'))) +config.read_file(open(os.path.join(task_script_dir, "run_fpga_task.conf"))) gc = config["GENERAL CONFIGURATION"] # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = @@ -73,11 +83,11 @@ gc = config["GENERAL CONFIGURATION"] # = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = = task_script_dir = os.path.dirname(os.path.abspath(__file__)) if not args.modelsim_proc_tmpl: - args.modelsim_proc_tmpl = os.path.join(task_script_dir, os.pardir, - "misc", "modelsim_proc.tcl") + args.modelsim_proc_tmpl = os.path.join(task_script_dir, os.pardir, "misc", "modelsim_proc.tcl") if not args.modelsim_runsim_tmpl: - args.modelsim_runsim_tmpl = os.path.join(task_script_dir, os.pardir, - "misc", "modelsim_runsim.tcl") + args.modelsim_runsim_tmpl = os.path.join( + task_script_dir, os.pardir, "misc", "modelsim_runsim.tcl" + ) args.modelsim_proc_tmpl = os.path.abspath(args.modelsim_proc_tmpl) args.modelsim_runsim_tmpl = os.path.abspath(args.modelsim_runsim_tmpl) @@ -101,10 +111,8 @@ def main(): clean_up_and_exit("Task run directory [%s] not found" % temp_dir) # = = = = = = = Create a current script log file handler = = = = - logfile_path = os.path.join(gc["task_dir"], - taskname, task_run, "modelsim_run.log") - resultfile_path = os.path.join(gc["task_dir"], - taskname, task_run, "modelsim_result.csv") + logfile_path = os.path.join(gc["task_dir"], taskname, task_run, "modelsim_run.log") + resultfile_path = os.path.join(gc["task_dir"], taskname, task_run, "modelsim_result.csv") logfilefh = logging.FileHandler(logfile_path, "w") logfilefh.setFormatter(logging.Formatter(FILE_LOG_FORMAT)) logger.addHandler(logfilefh) @@ -120,8 +128,9 @@ def main(): task_ini_files = [] for eachfile in logfiles: with open(eachfile) as fp: - run_dir = [re.findall(r'^INFO.*Run directory : (.*)$', line) - for line in open(eachfile)] + run_dir = [ + re.findall(r"^INFO.*Run directory : (.*)$", line) for line in open(eachfile) + ] run_dir = filter(bool, run_dir) for each_run in run_dir: INIfile = os.path.join(each_run[0], args.ini_filename) @@ -152,10 +161,10 @@ def create_tcl_script(files): # Resolve project Modelsim project path args.modelsim_run_dir = os.path.dirname(os.path.abspath(eachFile)) - modelsim_proj_dir = os.path.join( - args.modelsim_run_dir, "MMSIM2") - logger.info(f"Modelsim project dir not provide " + - f"using default {modelsim_proj_dir} directory") + modelsim_proj_dir = os.path.join(args.modelsim_run_dir, "MMSIM2") + logger.info( + f"Modelsim project dir not provide " + f"using default {modelsim_proj_dir} directory" + ) modelsim_proj_dir = os.path.abspath(modelsim_proj_dir) config["MODELSIM_PROJ_DIR"] = modelsim_proj_dir @@ -164,67 +173,68 @@ def create_tcl_script(files): # Resolve Modelsim Project name args.modelsim_proj_name = config["BENCHMARK"] + "_MMSIM" - logger.info(f"Modelsim project name not provide " + - f"using default {args.modelsim_proj_name} directory") + logger.info( + f"Modelsim project name not provide " + + f"using default {args.modelsim_proj_name} directory" + ) config["MODELSIM_PROJ_NAME"] = args.modelsim_proj_name config["MODELSIM_INI"] = args.modelsim_ini - config["VERILOG_PATH"] = os.path.join( - os.getcwd(), config["VERILOG_PATH"]) - IncludeFile = os.path.join( - os.getcwd(), - config["VERILOG_PATH"], - config["VERILOG_FILE2"]) + config["VERILOG_PATH"] = os.path.join(os.getcwd(), config["VERILOG_PATH"]) + IncludeFile = os.path.join(os.getcwd(), config["VERILOG_PATH"], config["VERILOG_FILE2"]) IncludeFileResolved = os.path.join( os.getcwd(), config["VERILOG_PATH"], - config["VERILOG_FILE2"].replace(".v", "_resolved.v")) + config["VERILOG_FILE2"].replace(".v", "_resolved.v"), + ) with open(IncludeFileResolved, "w") as fpw: with open(IncludeFile, "r") as fp: for eachline in fp.readlines(): - eachline = eachline.replace("\"./", "\"../../../") + eachline = eachline.replace('"./', '"../../../') fpw.write(eachline) # Modify the variables in config file here config["TOP_TB"] = os.path.splitext(config["TOP_TB"])[0] # Write final template file # Write runsim file - tmpl = Template(open(args.modelsim_runsim_tmpl, - encoding='utf-8').read()) - runsim_filename = os.path.join(modelsim_proj_dir, - "%s_runsim.tcl" % config['BENCHMARK']) + tmpl = Template(open(args.modelsim_runsim_tmpl, encoding="utf-8").read()) + runsim_filename = os.path.join(modelsim_proj_dir, "%s_runsim.tcl" % config["BENCHMARK"]) logger.info(f"Creating tcl script at : {runsim_filename}") - with open(runsim_filename, 'w', encoding='utf-8') as tclout: + with open(runsim_filename, "w", encoding="utf-8") as tclout: tclout.write(tmpl.substitute(config)) # Write proc file - proc_filename = os.path.join(modelsim_proj_dir, - "%s_autocheck_proc.tcl" % config['BENCHMARK']) + proc_filename = os.path.join( + modelsim_proj_dir, "%s_autocheck_proc.tcl" % config["BENCHMARK"] + ) logger.info(f"Creating tcl script at : {proc_filename}") - with open(proc_filename, 'w', encoding='utf-8') as tclout: - tclout.write(open(args.modelsim_proc_tmpl, - encoding='utf-8').read()) - runsim_files.append({ - "ini_file": eachFile, - "modelsim_run_dir": args.modelsim_run_dir, - "runsim_filename": runsim_filename, - "run_complete": False, - "status": False, - "finished": True, - "starttime": 0, - "endtime": 0, - "Errors": 0, - "Warnings": 0 - }) + with open(proc_filename, "w", encoding="utf-8") as tclout: + tclout.write(open(args.modelsim_proc_tmpl, encoding="utf-8").read()) + runsim_files.append( + { + "ini_file": eachFile, + "modelsim_run_dir": args.modelsim_run_dir, + "runsim_filename": runsim_filename, + "run_complete": False, + "status": False, + "finished": True, + "starttime": 0, + "endtime": 0, + "Errors": 0, + "Warnings": 0, + } + ) # Execute modelsim if args.run_sim: thread_sema = threading.Semaphore(args.maxthreads) logger.info("Launching %d parallel threads" % args.maxthreads) thread_list = [] for thread_no, eachjob in enumerate(runsim_files): - t = threading.Thread(target=run_modelsim_thread, - name=f"Thread_{thread_no:d}", - args=(thread_sema, eachjob, runsim_files)) + t = threading.Thread( + target=run_modelsim_thread, + name=f"Thread_{thread_no:d}", + args=(thread_sema, eachjob, runsim_files), + ) t.start() thread_list.append(t) for eachthread in thread_list: @@ -235,6 +245,7 @@ def create_tcl_script(files): logger.info(f"runsim_filename {runsim_filename}") logger.info(f"proc_filename {proc_filename}") from pprint import pprint + pprint(runsim_files) @@ -247,24 +258,24 @@ def run_modelsim_thread(s, eachJob, job_list): eachJob["Warnings"] = 0 try: logfile = "%s_modelsim.log" % thread_name - eachJob["logfile"] = "" + \ - os.path.relpath(logfile, gc["task_dir"]) - with open(logfile, 'w+') as output: - output.write("* "*20 + '\n') + eachJob["logfile"] = "" + os.path.relpath(logfile, gc["task_dir"]) + with open(logfile, "w+") as output: + output.write("* " * 20 + "\n") output.write("RunDirectory : %s\n" % os.getcwd()) command = ["vsim", "-c", "-do", eachJob["runsim_filename"]] - output.write(" ".join(command) + '\n') - output.write("* "*20 + '\n') + output.write(" ".join(command) + "\n") + output.write("* " * 20 + "\n") logger.info("Running modelsim with [%s]" % " ".join(command)) - process = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - universal_newlines=True) + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + universal_newlines=True, + ) for line in process.stdout: if "Errors" in line: logger.info(line.strip()) - e, w = re.match( - "# .*: ([0-9].*), .*: ([0-9].*)", line).groups() + e, w = re.match("# .*: ([0-9].*), .*: ([0-9].*)", line).groups() eachJob["Errors"] += int(e) eachJob["Warnings"] += int(w) sys.stdout.buffer.flush() @@ -276,37 +287,35 @@ def run_modelsim_thread(s, eachJob, job_list): if not eachJob["Errors"]: eachJob["status"] = True except: - logger.exception("Failed to execute openfpga flow - " + - eachJob["name"]) + logger.exception("Failed to execute openfpga flow - " + eachJob["name"]) if not args.continue_on_fail: os._exit(1) eachJob["endtime"] = time.time() - timediff = timedelta(seconds=(eachJob["endtime"]-eachJob["starttime"])) - timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules \ - else str(timediff) + timediff = timedelta(seconds=(eachJob["endtime"] - eachJob["starttime"])) + timestr = humanize.naturaldelta(timediff) if "humanize" in sys.modules else str(timediff) eachJob["exectime"] = timestr - logger.info("%s Finished with returncode %d, Time Taken %s " % - (thread_name, process.returncode, timestr)) + logger.info( + "%s Finished with returncode %d, Time Taken %s " + % (thread_name, process.returncode, timestr) + ) eachJob["finished"] = True no_of_finished_job = sum([not eachJ["finished"] for eachJ in job_list]) logger.info("***** %d runs pending *****" % (no_of_finished_job)) def collect_result(result_file, result_obj): - colnames = ["status", "Errors", "Warnings", - "run_complete", "exectime", "finished", "logfile"] + colnames = ["status", "Errors", "Warnings", "run_complete", "exectime", "finished", "logfile"] if len(result_obj): - with open(result_file, 'w', newline='') as csvfile: - writer = csv.DictWriter( - csvfile, extrasaction='ignore', fieldnames=colnames) + with open(result_file, "w", newline="") as csvfile: + writer = csv.DictWriter(csvfile, extrasaction="ignore", fieldnames=colnames) writer.writeheader() for eachResult in result_obj: writer.writerow(eachResult) - logger.info("= = = ="*10) + logger.info("= = = =" * 10) passed_jobs = [each["status"] for each in result_obj] logger.info(f"Passed Jobs %d/%d", len(passed_jobs), len(result_obj)) logger.info(f"Result file stored at {result_file}") - logger.info("= = = ="*10) + logger.info("= = = =" * 10) if __name__ == "__main__":