iprbench/pyproject.toml

42 lines
1.2 KiB
TOML
Raw Normal View History

[build-system]
requires = ["setuptools"]
build-backup = "setuptools.build_meta"
[project]
name = "iprbench"
dynamic = ["version"] # the list of fields whose values are dicovered by the backend (eg __version__)
description = "benchmarks for IPR (Institut de Physique de Rennes) cluster"
readme = "README.md"
keywords = ["benchmark", "hpc", "parallel", 'openmp']
license = {text = "MIT License"}
dependencies = [
"pandas",
"matplotlib",
"sqlalchemy",
# "cocluto >= 1.7",
"cocluto@git+https://git.ipr.univ-rennes.fr/cellinfo/cocluto@v1.7.0",
# "starbench >= 1.0.4"
"starbench@git+https://github.com/g-raffy/starbench@v1.0.4",
]
requires-python = ">= 3.8"
authors = [
{name = "Guillaume Raffy", email = "guillaume.raffy@univ-rennes.fr"}
]
[project.scripts]
refactored iprbench to separate ipr benchmark framework from the actual benchmarks This decoupling allows to write benchmarks as modules that can be used in various situations (from a benchmark job or directly from a user), but this design will allow automatic registering of the benchmark results in a user selectable form (sql database, stdout, etc.) - separated `hibenchonphysix.py` into `clusterbench.py` (tool to run a benchmark on a cluster) and `hibench.py` (hibridon benchmark module) so that `clusterbench.py` no longer has a knowledge about hibridon. - there are currently 2 ways to run a bechmark: 1. as a simple run through `clusterbench-run` command (which will eventually be renamed as iprbench-run since it might be completely independent from the concept of cluster) 2. as cluster jobs through `clusterbench-submit` command - added unit test - added another benchmark `mamul1` that is used as a unittest because it has 2 benefits over `hibench` benchmark: 1. it's standalone (no external resources needed) 2. it's quicker to execute note: this refactoring work is not complete yet, but the concept proof is complete (the 2 unittests pass): - still need to provide the user a way to switch between IpRCluster and DummyCluster(which is only intended to only be used for testing clusterbench)) - still need to run multiple configs of the same benchmark in one run (as hibenchonphysix did) work related to [https://bugzilla.ipr.univ-rennes.fr/show_bug.cgi?id=3958] and [https://bugzilla.ipr.univ-rennes.fr/show_bug.cgi?id=3372]
2024-10-22 09:16:41 +02:00
clusterbench-submit = "iprbench.clusterbench:main"
iprbench-run = "iprbench.main:main"
showresults = "iprbench.benchmarks.showresults:main"
[project.urls]
Repository = "https://github.com/g-raffy/starbench"
[tool.setuptools]
packages = ["iprbench", "iprbench.benchmarks", "iprbench.resultsdb"]
[tool.setuptools.dynamic]
version = {attr = "iprbench.version.__version__"}
[tool.setuptools.package-data]
iprbench = ["resources/**/*"]