Skip to content

Commit

Permalink
Add XPU to binary build generation
Browse files Browse the repository at this point in the history
  • Loading branch information
chuanqi129 committed Jul 22, 2024
1 parent 52eb4b5 commit 926bbf3
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 1 deletion.
5 changes: 5 additions & 0 deletions .github/workflows/build_wheels_linux.yml
Original file line number Diff line number Diff line change
Expand Up @@ -171,6 +171,11 @@ jobs:
run: |
set -euxo pipefail
cat "${{ inputs.env-var-script }}" >> "${BUILD_ENV_FILE}"
- name: Add XPU Env Vars in Build Env File
if: ${{ matrix.gpu_arch_type == 'xpu' }}
run: |
echo "set +u" >> "${BUILD_ENV_FILE}"
echo "source /opt/intel/oneapi/pytorch-gpu-dev-0.5/oneapi-vars.sh" >> "${BUILD_ENV_FILE}"
- name: Install torch dependency
run: |
set -euxo pipefail
Expand Down
5 changes: 5 additions & 0 deletions .github/workflows/generate_binary_build_matrix.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,10 @@ on:
description: "Build with CPU?"
default: "enable"
type: string
with-xpu:
description: "Build with XPU?"
default: "enable"
type: string
use-only-dl-pytorch-org:
description: "Use only download.pytorch.org when generating wheel install command?"
default: "false"
Expand Down Expand Up @@ -80,6 +84,7 @@ jobs:
WITH_CUDA: ${{ inputs.with-cuda }}
WITH_ROCM: ${{ inputs.with-rocm }}
WITH_CPU: ${{ inputs.with-cpu }}
WITH_XPU: ${{ inputs.with-xpu }}
# limit pull request builds to one version of python unless ciflow/binaries/all is applied to the workflow
# should not affect builds that are from events that are not the pull_request event
LIMIT_PR_BUILDS: ${{ github.event_name == 'pull_request' && !contains( github.event.pull_request.labels.*.name, 'ciflow/binaries/all') }}
Expand Down
24 changes: 23 additions & 1 deletion tools/scripts/generate_binary_build_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,6 +61,7 @@
CUDA_AARCH64 = "cuda-aarch64"
CUDA = "cuda"
ROCM = "rocm"
XPU = "xpu"


CURRENT_NIGHTLY_VERSION = "2.5.0"
Expand Down Expand Up @@ -107,6 +108,8 @@ def arch_type(arch_version: str) -> str:
return CPU_AARCH64
elif arch_version == CUDA_AARCH64:
return CUDA_AARCH64
elif arch_version == XPU:
return XPU
else: # arch_version should always be CPU in this case
return CPU

Expand Down Expand Up @@ -160,6 +163,7 @@ def initialize_globals(channel: str, build_python_only: bool) -> None:
for gpu_arch in ROCM_ARCHES
},
CPU: "pytorch/manylinux-builder:cpu",
XPU: "pytorch/manylinux2_28-builder:xpu",
CPU_AARCH64: "pytorch/manylinuxaarch64-builder:cpu-aarch64",
CUDA_AARCH64: "pytorch/manylinuxaarch64-builder:cuda12.4",
}
Expand Down Expand Up @@ -199,6 +203,7 @@ def translate_desired_cuda(gpu_arch_type: str, gpu_arch_version: str) -> str:
CUDA_AARCH64: "cu124",
CUDA: f"cu{gpu_arch_version.replace('.', '')}",
ROCM: f"rocm{gpu_arch_version}",
XPU: "xpu",
}.get(gpu_arch_type, gpu_arch_version)


Expand Down Expand Up @@ -327,6 +332,7 @@ def generate_conda_matrix(
with_cuda: str,
with_rocm: str,
with_cpu: str,
with_xpu: str,
limit_pr_builds: bool,
use_only_dl_pytorch_org: bool,
use_split_build: bool = False,
Expand Down Expand Up @@ -383,6 +389,7 @@ def generate_libtorch_matrix(
with_cuda: str,
with_rocm: str,
with_cpu: str,
with_xpu: str,
limit_pr_builds: bool,
use_only_dl_pytorch_org: bool,
use_split_build: bool = False,
Expand Down Expand Up @@ -472,6 +479,7 @@ def generate_wheels_matrix(
with_cuda: str,
with_rocm: str,
with_cpu: str,
with_xpu: str,
limit_pr_builds: bool,
use_only_dl_pytorch_org: bool,
use_split_build: bool = False,
Expand Down Expand Up @@ -510,6 +518,10 @@ def generate_wheels_matrix(
if os == LINUX:
arches += ROCM_ARCHES

if with_xpu == ENABLE:
if os == LINUX:
arches += [XPU]

if limit_pr_builds:
python_versions = [python_versions[0]]

Expand All @@ -523,7 +535,7 @@ def generate_wheels_matrix(
continue
gpu_arch_version = (
""
if arch_version in [CPU, CPU_AARCH64]
if arch_version in [CPU, CPU_AARCH64, XPU]
else arch_version
)

Expand Down Expand Up @@ -579,6 +591,7 @@ def generate_build_matrix(
with_cuda: str,
with_rocm: str,
with_cpu: str,
with_xpu: str,
limit_pr_builds: str,
use_only_dl_pytorch_org: str,
build_python_only: str,
Expand All @@ -602,6 +615,7 @@ def generate_build_matrix(
with_cuda,
with_rocm,
with_cpu,
with_xpu,
limit_pr_builds == "true",
use_only_dl_pytorch_org == "true",
use_split_build == "true",
Expand Down Expand Up @@ -653,6 +667,13 @@ def main(args: List[str]) -> None:
choices=[ENABLE, DISABLE],
default=os.getenv("WITH_CPU", ENABLE),
)
parser.add_argument(
"--with-xpu",
help="Build with XPU?",
type=str,
choices=[ENABLE, DISABLE],
default=os.getenv("WITH_XPU", ENABLE),
)
# By default this is false for this script but expectation is that the caller
# workflow will default this to be true most of the time, where a pull
# request is synchronized and does not contain the label "ciflow/binaries/all"
Expand Down Expand Up @@ -705,6 +726,7 @@ def main(args: List[str]) -> None:
options.with_cuda,
options.with_rocm,
options.with_cpu,
options.with_xpu,
options.limit_pr_builds,
options.use_only_dl_pytorch_org,
options.build_python_only,
Expand Down

0 comments on commit 926bbf3

Please sign in to comment.