Skip to content

Add llama.cpp backend #552

Add llama.cpp backend

Add llama.cpp backend #552

Workflow file for this run

name: API Misc Tests
on:
workflow_dispatch:
push:
branches:
- main
paths:
- .github/workflows/test_api_misc.yaml
- "optimum_benchmark/**"
- "docker/**"
- "tests/**"
- "setup.py"
pull_request:
branches:
- main
paths:
- .github/workflows/test_api_misc.yaml
- "optimum_benchmark/**"
- "docker/**"
- "tests/**"
- "setup.py"
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
run_api_misc_tests:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Install requirements
run: |
pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -e .[testing,timm,diffusers,codecarbon]
- name: Run tests
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
PUSH_REPO_ID: optimum-benchmark/misc
run: |
pytest -s -k "api and not (cpu or cuda)"