76 lines
3.2 KiB
Python
76 lines
3.2 KiB
Python
__version__ = '0.0.1'
|
|
|
|
from .core import BenchmarkId, IBenchmark, ResultsDbFactory
|
|
from .benchmarks.hibench import HiBench
|
|
from .benchmarks.mamul1 import MaMul1
|
|
from .resultsdb.tsvresultsdb import TsvResultsDbCreator
|
|
from .resultsdb.sqlresultsdb import SqliteResultsDbCreator, SqlServerResultsDbCreator
|
|
|
|
from .util import Singleton
|
|
from .autoparams import MeasurementTime, HostFqdn, NumCpus, CpuModel, IprBenchVersion, HostId
|
|
import logging
|
|
import argparse
|
|
from pathlib import Path
|
|
import json
|
|
|
|
|
|
class BenchmarkFactory(metaclass=Singleton):
|
|
|
|
def __init__(self):
|
|
pass
|
|
|
|
def create_benchmark(self, bench_id: BenchmarkId) -> IBenchmark:
|
|
benchmark = {
|
|
'hibench': HiBench(),
|
|
'mamul1': MaMul1()
|
|
}[bench_id]
|
|
return benchmark
|
|
|
|
|
|
def main():
|
|
"""
|
|
|
|
"""
|
|
logging.basicConfig(level=logging.DEBUG)
|
|
|
|
example_text = '''example:
|
|
|
|
%(prog)s --benchmark-id 'mamul1' --config '{"compiler_id": "gfortran", "matrix_size": 1024, "num_loops":10, "num_cores":2}' --results-dir /tmp/mamul1_out
|
|
|
|
'''
|
|
|
|
arg_parser = argparse.ArgumentParser(description='executes a benchmark in a cluster job environment', epilog=example_text, formatter_class=argparse.RawDescriptionHelpFormatter)
|
|
arg_parser.add_argument('--benchmark-id', type=BenchmarkId, required=True, help='the benchmark id of the benchmark to perform (eg mamul1)')
|
|
arg_parser.add_argument('--results-dir', type=Path, required=True, help='the root directory of the tree where the results of the benchmarks are stored (eg $GLOBAL_WORK_DIR/graffy/benchmarks/hibench)')
|
|
arg_parser.add_argument('--config', type=str, required=True, help='the benchmark configuration in json format, eg {"compiler_id": "gfortran", "matrix_size": 1024}')
|
|
arg_parser.add_argument('--resultsdb-params', type=str, required=True, help='the resultsdb configuration in json format, eg {"type": "tsv-files", "tsv_results_dir": "/tmp/toto"}')
|
|
|
|
args = arg_parser.parse_args()
|
|
|
|
benchmark_id = BenchmarkId(args.benchmark_id)
|
|
benchmark = BenchmarkFactory().create_benchmark(benchmark_id)
|
|
benchmark_config = json.loads(args.config)
|
|
benchmark.validate_config(benchmark_config)
|
|
results_dir = args.results_dir
|
|
|
|
ResultsDbFactory().register_resultsdb_creator(TsvResultsDbCreator())
|
|
ResultsDbFactory().register_resultsdb_creator(SqliteResultsDbCreator())
|
|
ResultsDbFactory().register_resultsdb_creator(SqlServerResultsDbCreator())
|
|
resultsdb_params = json.loads(args.resultsdb_params)
|
|
results_db = ResultsDbFactory().create_resultsdb(resultsdb_params['type'], resultsdb_params)
|
|
|
|
results_db.add_auto_param(MeasurementTime())
|
|
results_db.add_auto_param(IprBenchVersion())
|
|
results_db.add_auto_param(HostId())
|
|
results_db.add_auto_param(HostFqdn())
|
|
results_db.add_auto_param(NumCpus())
|
|
results_db.add_auto_param(CpuModel())
|
|
|
|
results_table = results_db.get_table(benchmark)
|
|
|
|
measurements = benchmark.execute(benchmark_config, results_dir)
|
|
results_table.add_results(benchmark_config, measurements)
|
|
|
|
# out_params.append(BenchParam('host_id', BenchParam.Type.PARAM_TYPE_STRING, 'the id of the host running the benchmark'))
|
|
# benchmark.get_measurements(results_dir)
|