Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Make OpenVINO throughput optional in benchmarking #239

Merged
merged 2 commits into from
Apr 20, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
21 changes: 16 additions & 5 deletions tools/benchmarking/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,21 +147,28 @@ def compute_on_cpu():
"""Compute all run configurations over a sigle CPU."""
sweep_config = OmegaConf.load("tools/benchmarking/benchmark_params.yaml")
for run_config in get_run_config(sweep_config.grid_search):
model_metrics = sweep(run_config, 0, sweep_config.seed)
model_metrics = sweep(run_config, 0, sweep_config.seed, False)
write_metrics(model_metrics, sweep_config.writer)


def compute_on_gpu(run_configs: Union[DictConfig, ListConfig], device: int, seed: int, writers: List[str]):
def compute_on_gpu(
run_configs: Union[DictConfig, ListConfig],
device: int,
seed: int,
writers: List[str],
compute_openvino: bool = False,
):
"""Go over each run config and collect the result.

Args:
run_configs (Union[DictConfig, ListConfig]): List of run configurations.
device (int): The GPU id used for running the sweep.
seed (int): Fix a seed.
writers (List[str]): Destinations to write to.
compute_openvino (bool, optional): Compute OpenVINO throughput. Defaults to False.
"""
for run_config in run_configs:
model_metrics = sweep(run_config, device, seed)
model_metrics = sweep(run_config, device, seed, compute_openvino)
write_metrics(model_metrics, writers)


Expand All @@ -183,6 +190,7 @@ def distribute_over_gpus():
device_id + 1,
sweep_config.seed,
sweep_config.writer,
sweep_config.compute_openvino,
)
)
for job in jobs:
Expand Down Expand Up @@ -220,11 +228,15 @@ def distribute():
upload_to_wandb(team="anomalib")


def sweep(run_config: Union[DictConfig, ListConfig], device: int = 0, seed: int = 42) -> Dict[str, Union[float, str]]:
def sweep(
run_config: Union[DictConfig, ListConfig], device: int = 0, seed: int = 42, convert_openvino: bool = False
) -> Dict[str, Union[float, str]]:
"""Go over all the values mentioned in `grid_search` parameter of the benchmarking config.

Args:
run_config: (Union[DictConfig, ListConfig], optional): Configuration for current run.
device (int, optional): Name of the device on which the model is trained. Defaults to 0 "cpu".
convert_openvino (bool, optional): Whether to convert the model to openvino format. Defaults to False.

Returns:
Dict[str, Union[float, str]]: Dictionary containing the metrics gathered from the sweep.
Expand All @@ -245,7 +257,6 @@ def sweep(run_config: Union[DictConfig, ListConfig], device: int = 0, seed: int

# Set device in config. 0 - cpu, [0], [1].. - gpu id
model_config.trainer.gpus = 0 if device == 0 else [device - 1]
convert_openvino = bool(model_config.trainer.gpus)

if run_config.model_name in ["patchcore", "cflow"]:
convert_openvino = False # `torch.cdist` is not supported by onnx version 11
Expand Down
1 change: 1 addition & 0 deletions tools/benchmarking/benchmark_params.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
seed: 42
compute_openvino: false
hardware:
- cpu
- gpu
Expand Down