ED_LRR/tests/test_benchmark.py

45 lines
1.3 KiB
Python

import pytest
import os
from math import log2, ceil
def resolve_systems(r, *names):
ret = []
mapping = r.resolve_systems(*names)
for name in names:
ret.append(mapping.get(name))
return ret
@pytest.fixture(scope="module")
def ed_lrr_router(stars_path):
stars_path, systems = stars_path
from ed_lrr_gui import PyRouter
r = PyRouter(lambda status: None)
r.load(stars_path)
system_ids = resolve_systems(r, *systems)
return r, system_ids
argvalues = [(0, 0)]
argvalues += [(2 ** n, 0) for n in range(17)]
argvalues += [(0, g) for g in (0.25, 0.5, 0.75, 1)]
ids = []
for width, greedyness in argvalues:
ids.append("beam_width:{}-greedyness:{}".format(width, greedyness))
n_workers = [0]
n_workers += [2**n for n in range(ceil(log2(os.cpu_count()))+1)]
@pytest.mark.parametrize("workers", n_workers,
ids=lambda v: "workers:{}".format(v))
@pytest.mark.parametrize(argnames=("width", "greedyness"),
argvalues=argvalues, ids=ids)
@pytest.mark.parametrize("r_range", [48.0], ids=lambda v: "range:{}".format(v))
def test_benchmark(benchmark, ed_lrr_router,
r_range, workers, greedyness, width):
r, system_ids = ed_lrr_router
args = system_ids, r_range, greedyness, width, workers
benchmark(r.route, *args)