From 6947d00799a0157eb4631984b0b48a90fcaeec36 Mon Sep 17 00:00:00 2001 From: elronbandel Date: Tue, 24 Sep 2024 16:59:01 +0300 Subject: [PATCH 1/2] Add concurency_limit parameter to WMLInferenceEngine Signed-off-by: elronbandel --- src/unitxt/inference.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/unitxt/inference.py b/src/unitxt/inference.py index 79eae04a6..b193af823 100644 --- a/src/unitxt/inference.py +++ b/src/unitxt/inference.py @@ -592,7 +592,7 @@ class WMLInferenceEngine( } data_classification_policy = ["public", "proprietary"] parameters: Optional[WMLInferenceEngineParams] = None - + concurrency_limit: int = 10 _client: Any = InternalField(default=None, name="WML client") def verify(self): @@ -666,6 +666,7 @@ def _infer(self, dataset): return model.generate_text( prompt=dataset["source"], params=self.to_dict([WMLInferenceEngineParamsMixin], keep_empty=False), + concurrency_limit=self.concurrency_limit, ) From 943f169a64fb6411c9501c5b1fae94896d964e04 Mon Sep 17 00:00:00 2001 From: Allysson Oliveira Date: Tue, 24 Sep 2024 17:25:36 +0300 Subject: [PATCH 2/2] update Signed-off-by: elronbandel --- src/unitxt/inference.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/unitxt/inference.py b/src/unitxt/inference.py index b193af823..d2afad260 100644 --- a/src/unitxt/inference.py +++ b/src/unitxt/inference.py @@ -559,6 +559,7 @@ class WMLInferenceEngine( parameters (WMLInferenceEngineParams, optional): Instance of WMLInferenceEngineParams which defines inference parameters and their values. Deprecated attribute, please pass respective parameters directly to the WMLInferenceEngine class instead. + concurrency_limit (int): number of requests that will be sent in parallel, max is 10. Examples: from .api import load_dataset