Merge pull request #1052 from lnis-uofu/refactor_task_py

Refactor run_fpga_task.py and run_fpga_flow.py
This commit is contained in:
tangxifan 2023-02-11 18:18:50 -08:00 committed by GitHub
commit 64b20c1942
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 49 additions and 21 deletions

View File

@ -395,7 +395,7 @@ def main():
def check_required_file(): def check_required_file():
""" Function ensure existace of all required files for the script """ """Function ensure existace of all required files for the script"""
files_dict = { files_dict = {
"CAD TOOL PATH": os.path.join( "CAD TOOL PATH": os.path.join(
flow_script_dir, os.pardir, "misc", "fpgaflow_default_tool_path.conf" flow_script_dir, os.pardir, "misc", "fpgaflow_default_tool_path.conf"
@ -407,7 +407,7 @@ def check_required_file():
def read_script_config(): def read_script_config():
""" This fucntion reads default CAD tools path from configuration file """ """This fucntion reads default CAD tools path from configuration file"""
global config, cad_tools global config, cad_tools
config = ConfigParser(interpolation=ExtendedInterpolation()) config = ConfigParser(interpolation=ExtendedInterpolation())
config.read_dict(script_env_vars) config.read_dict(script_env_vars)

View File

@ -67,7 +67,9 @@ parser.add_argument(
) )
parser.add_argument("--config", help="Override default configuration") parser.add_argument("--config", help="Override default configuration")
parser.add_argument( parser.add_argument(
"--test_run", action="store_true", help="Dummy run shows final generated VPR commands" "--test_run",
action="store_true",
help="Dummy run shows final generated VPR commands",
) )
parser.add_argument("--debug", action="store_true", help="Run script in debug mode") parser.add_argument("--debug", action="store_true", help="Run script in debug mode")
parser.add_argument("--continue_on_fail", action="store_true", help="Exit script with return code") parser.add_argument("--continue_on_fail", action="store_true", help="Exit script with return code")
@ -83,10 +85,13 @@ task_script_dir = os.path.dirname(os.path.abspath(__file__))
script_env_vars = { script_env_vars = {
"PATH": { "PATH": {
"OPENFPGA_FLOW_PATH": task_script_dir, "OPENFPGA_FLOW_PATH": task_script_dir,
"ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "arch"), "VPR_ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "openfpga_flow", "vpr_arch"),
"OPENFPGA_SHELLSCRIPT_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "OpenFPGAShellScripts"), "OF_ARCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "openfpga_flow", "openfpga_arch"),
"BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "benchmarks"), "OPENFPGA_SHELLSCRIPT_PATH": os.path.join(
"TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "tech"), "${PATH:OPENFPGA_PATH}", "openfpga_flow", "OpenFPGAShellScripts"
),
"BENCH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "openfpga_flow", "benchmarks"),
"TECH_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "openfpga_flow", "tech"),
"SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"), "SPICENETLIST_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "SpiceNetlists"),
"VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"), "VERILOG_PATH": os.path.join("${PATH:OPENFPGA_PATH}", "VerilogNetlists"),
"OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)), "OPENFPGA_PATH": os.path.abspath(os.path.join(task_script_dir, os.pardir, os.pardir)),
@ -381,8 +386,8 @@ def generate_each_task_actions(taskname):
# architecture, benchmark and parameters # architecture, benchmark and parameters
# Create run_job object [arch, bench, run_dir, commnad] # Create run_job object [arch, bench, run_dir, commnad]
flow_run_cmd_list = [] flow_run_cmd_list = []
for indx, arch in enumerate(archfile_list):
for bench in benchmark_list: for bench in benchmark_list:
for indx, arch in enumerate(archfile_list):
for lbl, param in bench["script_params"].items(): for lbl, param in bench["script_params"].items():
if benchmark_top_module_count.count(bench["top_module"]) > 1: if benchmark_top_module_count.count(bench["top_module"]) > 1:
flow_run_dir = get_flow_rundir( flow_run_dir = get_flow_rundir(
@ -400,6 +405,7 @@ def generate_each_task_actions(taskname):
param=param, param=param,
task_conf=task_conf, task_conf=task_conf,
) )
command += ["--flow_config", curr_task_conf_file]
flow_run_cmd_list.append( flow_run_cmd_list.append(
{ {
"arch": arch, "arch": arch,
@ -506,13 +512,21 @@ def create_run_command(curr_job_dir, archfile, benchmark_obj, param, task_conf):
if args.debug: if args.debug:
command += ["--debug"] command += ["--debug"]
return command return command
def strip_child_logger_info(line): def strip_child_logger_info(line):
try: try:
logtype, message = line.split(" - ", 1) logtype, message = line.split(" - ", 1)
lognumb = {"CRITICAL": 50, "ERROR": 40, "WARNING": 30, "INFO": 20, "DEBUG": 10, "NOTSET": 0} lognumb = {
"CRITICAL": 50,
"ERROR": 40,
"WARNING": 30,
"INFO": 20,
"DEBUG": 10,
"NOTSET": 0,
}
logger.log(lognumb[logtype.strip().upper()], message) logger.log(lognumb[logtype.strip().upper()], message)
except: except:
logger.info(line) logger.info(line)
@ -572,7 +586,9 @@ def run_actions(job_list):
thread_list = [] thread_list = []
for _, eachjob in enumerate(job_list): for _, eachjob in enumerate(job_list):
t = threading.Thread( t = threading.Thread(
target=run_single_script, name=eachjob["name"], args=(thread_sema, eachjob, job_list) target=run_single_script,
name=eachjob["name"],
args=(thread_sema, eachjob, job_list),
) )
t.start() t.start()
thread_list.append(t) thread_list.append(t)
@ -581,6 +597,9 @@ def run_actions(job_list):
def collect_results(job_run_list): def collect_results(job_run_list):
"""
Collect performance numbers from vpr_stat.result file
"""
task_result = [] task_result = []
for run in job_run_list: for run in job_run_list:
if not run["status"]: if not run["status"]:
@ -588,25 +607,34 @@ def collect_results(job_run_list):
continue continue
# Check if any result file exist # Check if any result file exist
if not glob.glob(os.path.join(run["run_dir"], "*.result")): if not glob.glob(os.path.join(run["run_dir"], "*.result")):
logger.info("No result files found for %s" % run["name"]) logger.info("No result files found for %s", run["name"])
continue
# Read and merge result file # Read and merge result file
vpr_res = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation()) vpr_res = ConfigParser(allow_no_value=True, interpolation=ExtendedInterpolation())
vpr_res.read_file(open(os.path.join(run["run_dir"], "vpr_stat.result"))) vpr_result_file = os.path.join(run["run_dir"], "vpr_stat.result")
vpr_res.read_file(open(vpr_result_file, encoding="UTF-8"))
result = OrderedDict() result = OrderedDict()
result["name"] = run["name"] result["name"] = run["name"]
result["TotalRunTime"] = int(run["endtime"] - run["starttime"]) result["TotalRunTime"] = int(run["endtime"] - run["starttime"])
result.update(vpr_res["RESULTS"]) result.update(vpr_res["RESULTS"])
task_result.append(result) task_result.append(result)
colnames = [] colnames = []
for eachLbl in task_result: # Extract all column names
colnames.extend(eachLbl.keys()) for each_metric in task_result:
if len(task_result): colnames.extend(set(each_metric.keys()) - {"name", "TotalRunTime"})
with open("task_result.csv", "w", newline="") as csvfile: colnames = sorted(list(set(colnames)))
writer = csv.DictWriter(csvfile, extrasaction="ignore", fieldnames=list(colnames)) if len(task_result) > 0:
with open("task_result.csv", "w", encoding="UTF-8", newline="") as csvfile:
writer = csv.DictWriter(
csvfile,
extrasaction="ignore",
fieldnames=["name", "TotalRunTime"] + colnames,
)
writer.writeheader() writer.writeheader()
for eachResult in task_result: for each in task_result:
writer.writerow(eachResult) writer.writerow(each)
if __name__ == "__main__": if __name__ == "__main__":