Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Cherry-Pick][Feature Branch][DeepSparse Evaluation API] Update lm-eval, perplexity, additional datasets #1596

Merged
merged 1 commit into from
Feb 12, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,6 +150,7 @@ def _parse_requirements_file(file_path):
"accelerate<0.26",
"scikit-learn",
"seqeval",
"evaluate",
]
_sentence_transformers_integration_deps = ["optimum-deepsparse"] + _torch_deps

Expand Down Expand Up @@ -310,7 +311,7 @@ def _setup_entry_points() -> Dict:
f"deepsparse.image_classification.eval={ic_eval}",
"deepsparse.license=deepsparse.license:main",
"deepsparse.validate_license=deepsparse.license:validate_license_cli",
"deepsparse.eval=deepsparse.evaluation.cli:main",
"deepsparse.evaluate=deepsparse.evaluation.cli:main",
]
}

Expand Down
14 changes: 6 additions & 8 deletions src/deepsparse/evaluation/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@
Module for evaluating models on the various evaluation integrations

OPTIONS:
--model_path MODEL_PATH
MODEL_PATH
A path to an ONNX model, local directory containing ONNX model
(including all the auxiliary files) or a SparseZoo stub
-d DATASET, --dataset DATASET
Expand Down Expand Up @@ -72,7 +72,7 @@

from deepsparse.evaluation.evaluator import evaluate
from deepsparse.evaluation.results import Result, save_result
from deepsparse.evaluation.utils import args_to_dict, get_save_path
from deepsparse.evaluation.utils import get_save_path, parse_kwarg_tuples
from deepsparse.operators.engine_operator import (
DEEPSPARSE_ENGINE,
ORT_ENGINE,
Expand All @@ -88,12 +88,10 @@
ignore_unknown_options=True,
)
)
@click.option(
"--model_path",
@click.argument(
"model_path",
type=click.Path(dir_okay=True, file_okay=True),
required=True,
help="A path to an ONNX model, local directory containing ONNX model"
"(including all the auxiliary files) or a SparseZoo stub",
)
@click.option(
"-d",
Expand Down Expand Up @@ -178,7 +176,7 @@ def main(
# join datasets to a list if multiple datasets are passed
datasets = list(dataset) if not isinstance(dataset, str) else dataset
# format kwargs to a dict
integration_args = args_to_dict(integration_args)
integration_args = parse_kwarg_tuples(integration_args)

_LOGGER.info(
f"Creating {engine_type} pipeline to evaluate from model path: {model_path}"
Expand All @@ -203,7 +201,7 @@ def main(
**integration_args,
)

_LOGGER.info(f"Evaluation done. Results:\n{result}")
_LOGGER.info(f"Evaluation done. Results:\n{result.formatted}")

save_path = get_save_path(
save_path=save_path,
Expand Down
1 change: 0 additions & 1 deletion src/deepsparse/evaluation/evaluator.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ def evaluate(
return eval_integration(
pipeline=pipeline,
datasets=datasets,
engine_type=engine_type,
batch_size=batch_size,
splits=splits,
metrics=metrics,
Expand Down
6 changes: 3 additions & 3 deletions src/deepsparse/evaluation/integrations/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
# flake8: noqa: F401


def try_import_lm_evaluation_harness(raise_error=False):
def try_import_lm_evaluation_harness(raise_error=True):
try:
import lm_eval

Expand All @@ -24,11 +24,11 @@ def try_import_lm_evaluation_harness(raise_error=False):
if raise_error:
raise ImportError(
"Unable to import lm_eval. "
"To install run 'pip install "
"git+https://github.com/EleutherAI/lm-evaluation-harness@b018a7d51'"
"To install run 'pip install lm-eval==0.4.0'"
)
return False


if try_import_lm_evaluation_harness(raise_error=False):
from .lm_evaluation_harness import *
from .perplexity import *
Loading