Skip to content

Commit

Permalink
Make openvino throughput optional (#239)
Browse files Browse the repository at this point in the history
Co-authored-by: Ashwin Vaidya <ashwinitinvaidya@gmail.com>
Co-authored-by: Samet Akcay <samet.akcay@intel.com>
  • Loading branch information
3 people committed Apr 20, 2022
1 parent a1d49f9 commit 8455303
Show file tree
Hide file tree
Showing 2 changed files with 17 additions and 5 deletions.
21 changes: 16 additions & 5 deletions tools/benchmarking/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,21 +147,28 @@ def compute_on_cpu():
"""Compute all run configurations over a sigle CPU."""
sweep_config = OmegaConf.load("tools/benchmarking/benchmark_params.yaml")
for run_config in get_run_config(sweep_config.grid_search):
model_metrics = sweep(run_config, 0, sweep_config.seed)
model_metrics = sweep(run_config, 0, sweep_config.seed, False)
write_metrics(model_metrics, sweep_config.writer)


def compute_on_gpu(run_configs: Union[DictConfig, ListConfig], device: int, seed: int, writers: List[str]):
def compute_on_gpu(
run_configs: Union[DictConfig, ListConfig],
device: int,
seed: int,
writers: List[str],
compute_openvino: bool = False,
):
"""Go over each run config and collect the result.
Args:
run_configs (Union[DictConfig, ListConfig]): List of run configurations.
device (int): The GPU id used for running the sweep.
seed (int): Fix a seed.
writers (List[str]): Destinations to write to.
compute_openvino (bool, optional): Compute OpenVINO throughput. Defaults to False.
"""
for run_config in run_configs:
model_metrics = sweep(run_config, device, seed)
model_metrics = sweep(run_config, device, seed, compute_openvino)
write_metrics(model_metrics, writers)


Expand All @@ -183,6 +190,7 @@ def distribute_over_gpus():
device_id + 1,
sweep_config.seed,
sweep_config.writer,
sweep_config.compute_openvino,
)
)
for job in jobs:
Expand Down Expand Up @@ -220,11 +228,15 @@ def distribute():
upload_to_wandb(team="anomalib")


def sweep(run_config: Union[DictConfig, ListConfig], device: int = 0, seed: int = 42) -> Dict[str, Union[float, str]]:
def sweep(
run_config: Union[DictConfig, ListConfig], device: int = 0, seed: int = 42, convert_openvino: bool = False
) -> Dict[str, Union[float, str]]:
"""Go over all the values mentioned in `grid_search` parameter of the benchmarking config.
Args:
run_config: (Union[DictConfig, ListConfig], optional): Configuration for current run.
device (int, optional): Name of the device on which the model is trained. Defaults to 0 "cpu".
convert_openvino (bool, optional): Whether to convert the model to openvino format. Defaults to False.
Returns:
Dict[str, Union[float, str]]: Dictionary containing the metrics gathered from the sweep.
Expand All @@ -245,7 +257,6 @@ def sweep(run_config: Union[DictConfig, ListConfig], device: int = 0, seed: int

# Set device in config. 0 - cpu, [0], [1].. - gpu id
model_config.trainer.gpus = 0 if device == 0 else [device - 1]
convert_openvino = bool(model_config.trainer.gpus)

if run_config.model_name in ["patchcore", "cflow"]:
convert_openvino = False # `torch.cdist` is not supported by onnx version 11
Expand Down
1 change: 1 addition & 0 deletions tools/benchmarking/benchmark_params.yaml
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
seed: 42
compute_openvino: false
hardware:
- cpu
- gpu
Expand Down

0 comments on commit 8455303

Please sign in to comment.