Skip to content

Update LLM Perf Benchmarks - CUDA PyTorch #90

Update LLM Perf Benchmarks - CUDA PyTorch

Update LLM Perf Benchmarks - CUDA PyTorch #90

name: Update LLM Perf Benchmarks - CUDA PyTorch
on: workflow_dispatch
# push:
# branches:
# - "add-t4-for-llm-perf-leaderboard"
# For debugging purpose the workflow is triggered manually
# schedule:
# - cron: "0 */6 * * *"
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.ref }}
env:
IMAGE: ghcr.io/huggingface/optimum-benchmark:latest-cuda
jobs:
run_benchmarks:
strategy:
fail-fast: false
matrix:
subset: [unquantized, bnb, awq, gptq]
machine: [
# {name: 1xA10, runs-on: {group: 'aws-g5-4xlarge-plus'}},
{name: 1xT4, runs-on: {group: 'aws-g4dn-2xlarge'}}
]
runs-on: ${{ matrix.machine.runs-on }}
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Run benchmarks
uses: addnab/docker-run-action@v3
env:
SUBSET: ${{ matrix.subset }}
MACHINE: ${{ matrix.machine.name }}
HF_TOKEN: ${{ secrets.HF_TOKEN }}
with:
image: ${{ env.IMAGE }}
options: |
--rm
--gpus all
--shm-size 64G
--env SUBSET
--env MACHINE
--env HF_TOKEN
--env MKL_THREADING_LAYER=GNU
--env HF_HUB_ENABLE_HF_TRANSFER=1
--volume ${{ github.workspace }}:/workspace
--workdir /workspace
run: |
pip install packaging && pip install flash-attn einops scipy auto-gptq optimum bitsandbytes autoawq codecarbon
pip install -U transformers huggingface_hub[hf_transfer]
pip install -e .
python llm_perf/update_llm_perf_cuda_pytorch.py