Skip to content

Commit

Permalink
Refactor
Browse files Browse the repository at this point in the history
  • Loading branch information
Che-Yu Wu committed May 23, 2023
1 parent 6517313 commit 70d345f
Show file tree
Hide file tree
Showing 8 changed files with 123 additions and 87 deletions.
24 changes: 16 additions & 8 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -662,29 +662,37 @@ endif()
# dependencies to them.
#-------------------------------------------------------------------------------

if(IREE_BUILD_LEGACY_BENCHMARKS OR IREE_BUILD_E2E_TEST_ARTIFACTS)
# TODO(#11263): Remove together with IREE_BUILD_LEGACY_BENCHMARKS.
if(IREE_BUILD_LEGACY_BENCHMARKS)
# Add top-level custom targets to drive generating benchmark suites.

# iree-benchmark-import-models imports benchmark models from their source
# formats, such as .tflite flatbuffers, to IREE-compatible .mlir files.
add_custom_target(iree-benchmark-import-models)

# iree-benchmark-suites* fully prepares benchmark models for benchmarking:
# iree-benchmark-suites fully prepares benchmark models for benchmarking:
# * importing from source formats to IREE-compatible .mlir files
# * compiling from .mlir files to benchmark-ready .vmfb files
# * generating flagfiles for executing the benchmark .vmfb files
# Build defualt benchmark suites.
add_custom_target(iree-benchmark-suites)
# Build long-running benchmark suites with larger models.
add_custom_target(iree-benchmark-suites-long)
endif()

# TODO(#11263): This conditional block should be merged with the block above
# once we remove IREE_BUILD_LEGACY_BENCHMARKS.
if(IREE_BUILD_E2E_TEST_ARTIFACTS)
# iree-benchmark-import-models imports benchmark models from their source
# formats, such as .tflite flatbuffers, to IREE-compatible .mlir files.
add_custom_target(iree-benchmark-import-models)

# iree-benchmark-suites* fully prepares benchmark models for benchmarking:
# * importing from source formats to IREE-compatible .mlir files
# * compiling from .mlir files to benchmark-ready .vmfb files
# Build default benchmark suites.
add_custom_target(iree-benchmark-suites)
# Build long-running benchmark suites.
add_custom_target(iree-benchmark-suites-long)

# iree-e2e-compile-stats-suites* compiles the benchmark models with specific
# flags to collect the compilation statistics.
# Build for defualt benchmark suites.
# Build for default benchmark suites.
add_custom_target(iree-e2e-compile-stats-suites)
# Build for long-running benchmark suites.
add_custom_target(iree-e2e-compile-stats-suites-long)
Expand Down
14 changes: 8 additions & 6 deletions build_tools/benchmarks/export_benchmark_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
import json
import textwrap

from benchmark_suites.iree import benchmark_collections
from benchmark_suites.iree import benchmark_collections, benchmark_tags
from e2e_test_artifacts import iree_artifacts
from e2e_test_framework import serialization
from e2e_test_framework.definitions import common_definitions, iree_definitions
Expand All @@ -50,12 +50,14 @@
"x86_64":
lambda config: config.target_device_spec.architecture.architecture ==
"x86_64",
"cuda": (lambda config: "cuda" in config.tags and "long-running" not in
config.tags),
"cuda":
lambda config: (benchmark_tags.CUDA in config.tags and benchmark_tags.
LONG_RUNNING not in config.tags),
"cuda-long":
lambda config: "cuda" in config.tags and "long-running" in config.tags,
lambda config: (benchmark_tags.CUDA in config.tags and benchmark_tags.
LONG_RUNNING in config.tags),
"vulkan-nvidia":
lambda config: "vulkan-nvidia" in config.tags,
lambda config: benchmark_tags.VULKAN_NVIDIA in config.tags,
"android-cpu":
lambda config:
(config.target_device_spec.architecture.type == common_definitions.
Expand Down Expand Up @@ -148,7 +150,7 @@ def _export_compilation_handler(_args: argparse.Namespace):
all_gen_configs, _ = benchmark_collections.generate_benchmarks()
compile_stats_gen_configs = [
config for config in all_gen_configs
if benchmark_collections.COMPILE_STATS_TAG in config.compile_config.tags
if benchmark_tags.COMPILE_STATS in config.compile_config.tags
]

distinct_module_dir_paths = _get_distinct_module_dir_paths(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,15 +9,13 @@

from e2e_test_artifacts import iree_artifacts
from e2e_test_framework.definitions import iree_definitions
from benchmark_suites.iree import (riscv_benchmarks, x86_64_benchmarks,
adreno_benchmarks, armv8_a_benchmarks,
cuda_benchmarks, mali_benchmarks,
vulkan_nvidia_benchmarks, vmvx_benchmarks)
from benchmark_suites.iree import (benchmark_tags, riscv_benchmarks,
x86_64_benchmarks, adreno_benchmarks,
armv8_a_benchmarks, cuda_benchmarks,
mali_benchmarks, vulkan_nvidia_benchmarks,
vmvx_benchmarks)

COMPILE_STATS_ID_SUFFIX = "-compile-stats"
# Tag that indicates this compile config is generated for collecting compilation
# statistics.
COMPILE_STATS_TAG = "compile-stats"


def generate_benchmarks(
Expand Down Expand Up @@ -49,7 +47,7 @@ def generate_benchmarks(
scheduling_stats_path = f"{iree_definitions.MODULE_DIR_VARIABLE}/{iree_artifacts.SCHEDULING_STATS_FILENAME}"
compile_stats_config = iree_definitions.CompileConfig.build(
id=compile_config.id + COMPILE_STATS_ID_SUFFIX,
tags=compile_config.tags + [COMPILE_STATS_TAG],
tags=compile_config.tags + [benchmark_tags.COMPILE_STATS],
compile_targets=compile_config.compile_targets,
extra_flags=compile_config.extra_flags + [
# Enable zip polyglot to provide component sizes.
Expand All @@ -63,7 +61,8 @@ def generate_benchmarks(
compile_stats_gen_configs.append(
iree_definitions.ModuleGenerationConfig.build(
imported_model=gen_config.imported_model,
compile_config=compile_stats_config))
compile_config=compile_stats_config,
tags=gen_config.tags))
all_gen_configs += compile_stats_gen_configs

return (all_gen_configs, all_run_configs)
18 changes: 18 additions & 0 deletions build_tools/python/benchmark_suites/iree/benchmark_tags.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
## Copyright 2023 The IREE Authors
#
# Licensed under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception

# Tag that indicates this compile config is generated for collecting compilation
# statistics.
COMPILE_STATS = "compile-stats"

# Tag for long-running benchmarks.
LONG_RUNNING = "long-running"

# Tag for CUDA benchamrks.
CUDA = "cuda"

# Tag for Vulkan NVIDIA benchamrks.
VULKAN_NVIDIA = "vulkan-nvidia"
17 changes: 9 additions & 8 deletions build_tools/python/benchmark_suites/iree/cuda_benchmarks.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"""Defines IREE CUDA benchmarks."""

from typing import List, Tuple, Sequence
from benchmark_suites.iree import module_execution_configs
from benchmark_suites.iree import benchmark_tags, module_execution_configs
from e2e_test_framework import unique_ids
from e2e_test_framework.definitions import common_definitions, iree_definitions
from e2e_test_framework.device_specs import device_collections
Expand Down Expand Up @@ -71,24 +71,25 @@ def generate(
) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
List[iree_definitions.E2EModelRunConfig]]:
"""Generates IREE compile and run configs."""
# The `cuda` tag is required to put them into the CUDA benchmark preset.
gen_configs, run_configs = self._generate_configs(model_groups.CUDA_MODELS,
self.SM_80_COMPILE_CONFIG,
tags=["cuda"])
# The CUDA tag is required to put them into the CUDA benchmark preset.
gen_configs, run_configs = self._generate_configs(
model_groups.CUDA_MODELS,
self.SM_80_COMPILE_CONFIG,
tags=[benchmark_tags.CUDA])
ubench_gen_configs, ubench_run_configs = self._generate_configs(
model_groups.MICRO_MATMUL,
self.SM_80_UBENCH_MATMUL_COMPILE_CONFIG,
execution_config=module_execution_configs.CUDA_BATCH_SIZE_100_CONFIG,
tags=["cuda"])
tags=[benchmark_tags.CUDA])
ubench_splitk_gen_configs, ubench_splitk_run_configs = self._generate_configs(
model_groups.MICRO_MATMUL_SPLITK,
self.SM_80_UBENCH_MATMUL_SPLITK_COMPILE_CONFIG,
execution_config=module_execution_configs.CUDA_BATCH_SIZE_100_CONFIG,
tags=["cuda"])
tags=[benchmark_tags.CUDA])
long_running_gen_configs, long_running_module_configs = self._generate_configs(
model_groups.CUDA_MODELS_LONG,
self.SM_80_COMPILE_CONFIG,
tags=["cuda", "long-running"])
tags=[benchmark_tags.CUDA, benchmark_tags.LONG_RUNNING])
return (gen_configs + ubench_gen_configs + ubench_splitk_gen_configs +
long_running_gen_configs, run_configs + ubench_run_configs +
ubench_splitk_run_configs + long_running_module_configs)
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"""Defines IREE Vulkan NVIDIA benchmarks."""

from typing import List, Tuple, Sequence
from benchmark_suites.iree import module_execution_configs
from benchmark_suites.iree import benchmark_tags, module_execution_configs
from e2e_test_framework import unique_ids
from e2e_test_framework.definitions import common_definitions, iree_definitions
from e2e_test_framework.device_specs import device_collections
Expand Down Expand Up @@ -58,14 +58,14 @@ def _generate_configs(
compile_config: iree_definitions.CompileConfig,
execution_config: iree_definitions.
ModuleExecutionConfig = module_execution_configs.VULKAN_CONFIG,
run_tags: Sequence[str] = [],
tags: Sequence[str] = [],
) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
List[iree_definitions.E2EModelRunConfig]]:
gen_configs = [
iree_definitions.ModuleGenerationConfig.build(
compile_config=compile_config,
imported_model=iree_definitions.ImportedModel.from_model(model))
for model in models
imported_model=iree_definitions.ImportedModel.from_model(model),
tags=tags) for model in models
]
# We use the same NVIDIA Ampere GPU for benchmarking code generated for
# both Pascal and Ampere architectures. What we care is not exactly these
Expand All @@ -79,7 +79,7 @@ def _generate_configs(
module_generation_configs=gen_configs,
module_execution_configs=[execution_config],
device_specs=ampere_devices,
tags=run_tags)
tags=tags)

return (gen_configs, run_module_configs)

Expand All @@ -88,15 +88,15 @@ def generate(
) -> Tuple[List[iree_definitions.ModuleGenerationConfig],
List[iree_definitions.E2EModelRunConfig]]:
"""Generates IREE compile and run configs."""
# The `vulkan-nvidia` tag is required to put them into the Vulkan NVIDIA
# The `vulkan-nvidia`` tag is required to put them into the Vulkan NVIDIA
# benchmark preset.
tensorcore_gen_configs, tensorcore_run_configs = self._generate_configs(
model_groups.VULKAN_MODELS,
self.TENSORCORE_COMPILE_CONFIG,
run_tags=["vulkan-nvidia"])
tags=[benchmark_tags.VULKAN_NVIDIA])
simt_gen_configs, simt_run_configs = self._generate_configs(
model_groups.VULKAN_MODELS,
self.SIMT_COMPILE_CONFIG,
run_tags=["vulkan-nvidia"])
tags=[benchmark_tags.VULKAN_NVIDIA])
return (tensorcore_gen_configs + simt_gen_configs,
tensorcore_run_configs + simt_run_configs)
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,12 @@
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Generates CMake rules to build IREE artifacts."""

from dataclasses import dataclass
import collections
from dataclasses import dataclass
from typing import Dict, List, Sequence
import pathlib

from benchmark_suites.iree import benchmark_collections
from benchmark_suites.iree import benchmark_tags
from e2e_test_artifacts import iree_artifacts
from e2e_test_artifacts.cmake_generator import model_rule_generator
from e2e_test_framework.definitions import iree_definitions
Expand All @@ -21,10 +21,10 @@
BENCHMARK_SUITES_CMAKE_TARGET = "iree-benchmark-suites"
# Compilation statistics suites for default benchmarks.
E2E_COMPILE_STATS_SUITES = "iree-e2e-compile-stats-suites"
# Long-running benchmark suites with larger models.
# Long-running benchmark suites.
LONG_BENCHMARK_SUITES_CMAKE_TARGET = "iree-benchmark-suites-long"
# Compilation statistics suites for long-running benchmarks.
LONG_E2E_COMPILE_STATS_SUITES = "iree-e2e-compile-stats-suites-long"
LONG_E2E_COMPILE_STATS_SUITES_CMAKE_TARGET = "iree-e2e-compile-stats-suites-long"


@dataclass(frozen=True)
Expand Down Expand Up @@ -177,14 +177,19 @@ def generate_rules(
module_generation_config=gen_config,
output_file_path=module_dir_path / iree_artifacts.MODULE_FILENAME)

has_compile_stats_tag = (benchmark_collections.COMPILE_STATS_TAG
in gen_config.compile_config.tags)
if "long-running" in gen_config.tags:
suite_target = (LONG_E2E_COMPILE_STATS_SUITES if has_compile_stats_tag
else LONG_BENCHMARK_SUITES_CMAKE_TARGET)
is_compile_stats = (benchmark_tags.COMPILE_STATS
in gen_config.compile_config.tags)
if benchmark_tags.LONG_RUNNING in gen_config.tags:
if is_compile_stats:
suite_target = LONG_E2E_COMPILE_STATS_SUITES_CMAKE_TARGET
else:
suite_target = LONG_BENCHMARK_SUITES_CMAKE_TARGET
else:
suite_target = (E2E_COMPILE_STATS_SUITES if has_compile_stats_tag else
BENCHMARK_SUITES_CMAKE_TARGET)
if is_compile_stats:
suite_target = E2E_COMPILE_STATS_SUITES
else:
suite_target = BENCHMARK_SUITES_CMAKE_TARGET

suite_target_names[suite_target].append(module_compile_rule.target_name)

cmake_rules.extend(module_compile_rule.cmake_rules)
Expand Down
Loading

0 comments on commit 70d345f

Please sign in to comment.