diff --git a/examples/research_projects/distillation/grouped_batch_sampler.py b/examples/research_projects/distillation/grouped_batch_sampler.py index fd126b13b58ee7..e25def738a8483 100644 --- a/examples/research_projects/distillation/grouped_batch_sampler.py +++ b/examples/research_projects/distillation/grouped_batch_sampler.py @@ -59,7 +59,7 @@ class GroupedBatchSampler(BatchSampler): def __init__(self, sampler, group_ids, batch_size): if not isinstance(sampler, Sampler): - raise ValueError( + raise TypeError( "sampler should be an instance of torch.utils.data.Sampler, but got sampler={}".format(sampler) ) self.sampler = sampler diff --git a/examples/research_projects/tapex/wikisql_utils.py b/examples/research_projects/tapex/wikisql_utils.py index 3351bddf019448..13d10e091a10c1 100644 --- a/examples/research_projects/tapex/wikisql_utils.py +++ b/examples/research_projects/tapex/wikisql_utils.py @@ -48,7 +48,7 @@ def convert_to_float(value): if isinstance(value, int): return float(value) if not isinstance(value, str): - raise ValueError("Argument value is not a string. Can't parse it as float") + raise TypeError("Argument value is not a string. Can't parse it as float") sanitized = value try: @@ -158,7 +158,7 @@ def _respect_conditions(table, row, conditions): cmp_value = _normalize_for_match(cmp_value) if not isinstance(table_value, type(cmp_value)): - raise ValueError("Type difference {} != {}".format(type(table_value), type(cmp_value))) + raise TypeError("Type difference {} != {}".format(type(table_value), type(cmp_value))) if not _compare(cond.operator, table_value, cmp_value): return False diff --git a/src/transformers/agents/agent_types.py b/src/transformers/agents/agent_types.py index 0b4999b7f76d3c..114b6de01c3333 100644 --- a/src/transformers/agents/agent_types.py +++ b/src/transformers/agents/agent_types.py @@ -107,7 +107,7 @@ def __init__(self, value): elif isinstance(value, np.ndarray): self._tensor = torch.tensor(value) else: - raise ValueError(f"Unsupported type for {self.__class__.__name__}: {type(value)}") + raise TypeError(f"Unsupported type for {self.__class__.__name__}: {type(value)}") def _ipython_display_(self, include=None, exclude=None): """ diff --git a/src/transformers/configuration_utils.py b/src/transformers/configuration_utils.py index c6de824339bbc0..2f84bc29aee25d 100755 --- a/src/transformers/configuration_utils.py +++ b/src/transformers/configuration_utils.py @@ -1004,7 +1004,7 @@ def update_from_string(self, update_str: str): elif isinstance(old_v, float): v = float(v) elif not isinstance(old_v, str): - raise ValueError( + raise TypeError( f"You can only update int, float, bool or string values in the config, got {v} for key {k}" ) diff --git a/src/transformers/data/processors/xnli.py b/src/transformers/data/processors/xnli.py index 459c5bc3a6a38e..4d8ec17a8345db 100644 --- a/src/transformers/data/processors/xnli.py +++ b/src/transformers/data/processors/xnli.py @@ -47,11 +47,11 @@ def get_train_examples(self, data_dir): text_b = line[1] label = "contradiction" if line[2] == "contradictory" else line[2] if not isinstance(text_a, str): - raise ValueError(f"Training input {text_a} is not a string") + raise TypeError(f"Training input {text_a} is not a string") if not isinstance(text_b, str): - raise ValueError(f"Training input {text_b} is not a string") + raise TypeError(f"Training input {text_b} is not a string") if not isinstance(label, str): - raise ValueError(f"Training label {label} is not a string") + raise TypeError(f"Training label {label} is not a string") examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples @@ -70,11 +70,11 @@ def get_test_examples(self, data_dir): text_b = line[7] label = line[1] if not isinstance(text_a, str): - raise ValueError(f"Training input {text_a} is not a string") + raise TypeError(f"Training input {text_a} is not a string") if not isinstance(text_b, str): - raise ValueError(f"Training input {text_b} is not a string") + raise TypeError(f"Training input {text_b} is not a string") if not isinstance(label, str): - raise ValueError(f"Training label {label} is not a string") + raise TypeError(f"Training label {label} is not a string") examples.append(InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label)) return examples diff --git a/src/transformers/generation/beam_constraints.py b/src/transformers/generation/beam_constraints.py index b53c4512427a87..e6462f322c49f7 100644 --- a/src/transformers/generation/beam_constraints.py +++ b/src/transformers/generation/beam_constraints.py @@ -156,7 +156,7 @@ def advance(self): def does_advance(self, token_id: int): if not isinstance(token_id, int): - raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") + raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") if self.completed: return False @@ -165,7 +165,7 @@ def does_advance(self, token_id: int): def update(self, token_id: int): if not isinstance(token_id, int): - raise ValueError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") + raise TypeError(f"`token_id` has to be an `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False @@ -300,7 +300,7 @@ def advance(self): def does_advance(self, token_id: int): if not isinstance(token_id, int): - raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") + raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") next_tokens = self.trie.next_tokens(self.current_seq) @@ -308,7 +308,7 @@ def does_advance(self, token_id: int): def update(self, token_id: int): if not isinstance(token_id, int): - raise ValueError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") + raise TypeError(f"`token_id` is supposed to be type `int`, but is {token_id} of type {type(token_id)}") stepped = False completed = False @@ -432,7 +432,7 @@ def reset(self, token_ids: Optional[List[int]]): def add(self, token_id: int): if not isinstance(token_id, int): - raise ValueError(f"`token_id` should be an `int`, but is `{token_id}`.") + raise TypeError(f"`token_id` should be an `int`, but is `{token_id}`.") complete, stepped = False, False diff --git a/src/transformers/generation/utils.py b/src/transformers/generation/utils.py index 51019da9a6b378..9d3a92d268819a 100644 --- a/src/transformers/generation/utils.py +++ b/src/transformers/generation/utils.py @@ -4281,7 +4281,7 @@ def _split(data, full_batch_size: int, split_size: int = None): for i in range(0, full_batch_size, split_size) ] else: - raise ValueError(f"Unexpected attribute type: {type(data)}") + raise TypeError(f"Unexpected attribute type: {type(data)}") def _split_model_inputs( @@ -4388,7 +4388,7 @@ def _concat(data): # If the elements are integers or floats, return a tensor return torch.tensor(data) else: - raise ValueError(f"Unexpected attribute type: {type(data[0])}") + raise TypeError(f"Unexpected attribute type: {type(data[0])}") # Use a dictionary comprehension to gather attributes from all objects and concatenate them concatenated_data = { diff --git a/src/transformers/image_processing_base.py b/src/transformers/image_processing_base.py index 6c80aee0164722..9b314f83c11fb1 100644 --- a/src/transformers/image_processing_base.py +++ b/src/transformers/image_processing_base.py @@ -544,7 +544,7 @@ def fetch_images(self, image_url_or_urls: Union[str, List[str]]): response.raise_for_status() return Image.open(BytesIO(response.content)) else: - raise ValueError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}") + raise TypeError(f"only a single or a list of entries is supported but got type={type(image_url_or_urls)}") ImageProcessingMixin.push_to_hub = copy_func(ImageProcessingMixin.push_to_hub) diff --git a/src/transformers/image_transforms.py b/src/transformers/image_transforms.py index 4e4812879eed1c..580570f6066278 100644 --- a/src/transformers/image_transforms.py +++ b/src/transformers/image_transforms.py @@ -75,7 +75,7 @@ def to_channel_dimension_format( `np.ndarray`: The image with the channel dimension set to `channel_dim`. """ if not isinstance(image, np.ndarray): - raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") + raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}") if input_channel_dim is None: input_channel_dim = infer_channel_dimension_format(image) @@ -121,7 +121,7 @@ def rescale( `np.ndarray`: The rescaled image. """ if not isinstance(image, np.ndarray): - raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") + raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}") rescaled_image = image * scale if data_format is not None: @@ -453,7 +453,7 @@ def center_crop( return_numpy = True if return_numpy is None else return_numpy if not isinstance(image, np.ndarray): - raise ValueError(f"Input image must be of type np.ndarray, got {type(image)}") + raise TypeError(f"Input image must be of type np.ndarray, got {type(image)}") if not isinstance(size, Iterable) or len(size) != 2: raise ValueError("size must have 2 elements representing the height and width of the output image") diff --git a/src/transformers/image_utils.py b/src/transformers/image_utils.py index 13edcb3a2ad43b..4449b602491ad9 100644 --- a/src/transformers/image_utils.py +++ b/src/transformers/image_utils.py @@ -377,7 +377,7 @@ def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] = elif isinstance(image, PIL.Image.Image): image = image else: - raise ValueError( + raise TypeError( "Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image." ) image = PIL.ImageOps.exif_transpose(image) diff --git a/src/transformers/integrations/awq.py b/src/transformers/integrations/awq.py index 30427aa405dd56..550c23fde3d4ad 100644 --- a/src/transformers/integrations/awq.py +++ b/src/transformers/integrations/awq.py @@ -199,7 +199,7 @@ def get_modules_to_fuse(model, quantization_config): The quantization configuration to use. """ if not isinstance(model, PreTrainedModel): - raise ValueError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}") + raise TypeError(f"The model should be an instance of `PreTrainedModel`, got {model.__class__.__name__}") # Always default to `quantization_config.modules_to_fuse` if quantization_config.modules_to_fuse is not None: diff --git a/src/transformers/integrations/peft.py b/src/transformers/integrations/peft.py index a543315410c785..923aa59e4184dc 100644 --- a/src/transformers/integrations/peft.py +++ b/src/transformers/integrations/peft.py @@ -262,9 +262,7 @@ def add_adapter(self, adapter_config, adapter_name: Optional[str] = None) -> Non raise ValueError(f"Adapter with name {adapter_name} already exists. Please use a different name.") if not isinstance(adapter_config, PeftConfig): - raise ValueError( - f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead." - ) + raise TypeError(f"adapter_config should be an instance of PeftConfig. Got {type(adapter_config)} instead.") # Retrieve the name or path of the model, one could also use self.config._name_or_path # but to be consistent with what we do in PEFT: https://github.com/huggingface/peft/blob/6e783780ca9df3a623992cc4d1d665001232eae0/src/peft/mapping.py#L100 diff --git a/src/transformers/modeling_tf_utils.py b/src/transformers/modeling_tf_utils.py index 0ad5dd0396194a..3d7658ba372130 100644 --- a/src/transformers/modeling_tf_utils.py +++ b/src/transformers/modeling_tf_utils.py @@ -1209,7 +1209,7 @@ def build(self, input_shape=None): def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) if not isinstance(config, PretrainedConfig): - raise ValueError( + raise TypeError( f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class " "`PretrainedConfig`. To create a model from a pretrained model use " f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`" diff --git a/src/transformers/models/align/modeling_align.py b/src/transformers/models/align/modeling_align.py index d6e6023a26f768..1b744d0f208d46 100644 --- a/src/transformers/models/align/modeling_align.py +++ b/src/transformers/models/align/modeling_align.py @@ -1418,13 +1418,13 @@ def __init__(self, config: AlignConfig): super().__init__(config) if not isinstance(config.text_config, AlignTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type AlignTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, AlignVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type AlignVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/altclip/modeling_altclip.py b/src/transformers/models/altclip/modeling_altclip.py index 10c9e10491bbda..f9856ef701f9e0 100755 --- a/src/transformers/models/altclip/modeling_altclip.py +++ b/src/transformers/models/altclip/modeling_altclip.py @@ -1466,12 +1466,12 @@ def __init__(self, config: AltCLIPConfig): super().__init__(config) if not isinstance(config.vision_config, AltCLIPVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type AltCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) if not isinstance(config.text_config, AltCLIPTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type AltCLIPTextConfig but is of type" f" {type(config.text_config)}." ) diff --git a/src/transformers/models/bark/processing_bark.py b/src/transformers/models/bark/processing_bark.py index a9bf55b51f6015..53715f3260422c 100644 --- a/src/transformers/models/bark/processing_bark.py +++ b/src/transformers/models/bark/processing_bark.py @@ -211,7 +211,7 @@ def _validate_voice_preset_dict(self, voice_preset: Optional[dict] = None): raise ValueError(f"Voice preset unrecognized, missing {key} as a key.") if not isinstance(voice_preset[key], np.ndarray): - raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.") + raise TypeError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.") if len(voice_preset[key].shape) != self.preset_shape[key]: raise ValueError(f"{key} voice preset must be a {str(self.preset_shape[key])}D ndarray.") diff --git a/src/transformers/models/blip/modeling_blip.py b/src/transformers/models/blip/modeling_blip.py index a8f20ace6bd862..46e3a6005b0af6 100644 --- a/src/transformers/models/blip/modeling_blip.py +++ b/src/transformers/models/blip/modeling_blip.py @@ -755,13 +755,13 @@ def __init__(self, config: BlipConfig): super().__init__(config) if not isinstance(config.text_config, BlipTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type BlipTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, BlipVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type BlipVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/blip/modeling_tf_blip.py b/src/transformers/models/blip/modeling_tf_blip.py index 1557677eb3fbf2..6c9942b73acefb 100644 --- a/src/transformers/models/blip/modeling_tf_blip.py +++ b/src/transformers/models/blip/modeling_tf_blip.py @@ -794,13 +794,13 @@ def __init__(self, config: BlipConfig, *args, **kwargs): super().__init__(*args, **kwargs) if not isinstance(config.text_config, BlipTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type BlipTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, BlipVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type BlipVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/chameleon/processing_chameleon.py b/src/transformers/models/chameleon/processing_chameleon.py index 559cac62e3d5a7..1480808336d14e 100644 --- a/src/transformers/models/chameleon/processing_chameleon.py +++ b/src/transformers/models/chameleon/processing_chameleon.py @@ -113,7 +113,7 @@ def __call__( if isinstance(text, str): text = [text] elif not isinstance(text, list) and not isinstance(text[0], str): - raise ValueError("Invalid input text. Please provide a string, or a list of strings") + raise TypeError("Invalid input text. Please provide a string, or a list of strings") # Replace the image token with the expanded image token sequence prompt_strings = [] diff --git a/src/transformers/models/chinese_clip/modeling_chinese_clip.py b/src/transformers/models/chinese_clip/modeling_chinese_clip.py index 801969c465bfb0..6fbd9459f5ad71 100644 --- a/src/transformers/models/chinese_clip/modeling_chinese_clip.py +++ b/src/transformers/models/chinese_clip/modeling_chinese_clip.py @@ -1341,13 +1341,13 @@ def __init__(self, config: ChineseCLIPConfig): super().__init__(config) if not isinstance(config.text_config, ChineseCLIPTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type ChineseCLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, ChineseCLIPVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type ChineseCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/clap/modeling_clap.py b/src/transformers/models/clap/modeling_clap.py index 3e83daa942c022..939032f2c894cc 100644 --- a/src/transformers/models/clap/modeling_clap.py +++ b/src/transformers/models/clap/modeling_clap.py @@ -1928,13 +1928,13 @@ def __init__(self, config: ClapConfig): super().__init__(config) if not isinstance(config.text_config, ClapTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type ClapTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.audio_config, ClapAudioConfig): - raise ValueError( + raise TypeError( "config.audio_config is expected to be of type ClapAudioConfig but is of type" f" {type(config.audio_config)}." ) diff --git a/src/transformers/models/clip/modeling_clip.py b/src/transformers/models/clip/modeling_clip.py index b96acfc0936c1d..ee85fe3125873b 100644 --- a/src/transformers/models/clip/modeling_clip.py +++ b/src/transformers/models/clip/modeling_clip.py @@ -1119,13 +1119,13 @@ def __init__(self, config: CLIPConfig): super().__init__(config) if not isinstance(config.text_config, CLIPTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type CLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, CLIPVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type CLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/clip/modeling_tf_clip.py b/src/transformers/models/clip/modeling_tf_clip.py index b728da52c222b4..ca5f4aede21854 100644 --- a/src/transformers/models/clip/modeling_tf_clip.py +++ b/src/transformers/models/clip/modeling_tf_clip.py @@ -825,13 +825,13 @@ def __init__(self, config: CLIPConfig, **kwargs): super().__init__(**kwargs) if not isinstance(config.text_config, CLIPTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type CLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, CLIPVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type CLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/clipseg/modeling_clipseg.py b/src/transformers/models/clipseg/modeling_clipseg.py index af7b94a10f4d7c..97fcf3d1f2b3e6 100644 --- a/src/transformers/models/clipseg/modeling_clipseg.py +++ b/src/transformers/models/clipseg/modeling_clipseg.py @@ -924,13 +924,13 @@ def __init__(self, config: CLIPSegConfig): super().__init__(config) if not isinstance(config.text_config, CLIPSegTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type CLIPSegTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, CLIPSegVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type CLIPSegVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/clvp/modeling_clvp.py b/src/transformers/models/clvp/modeling_clvp.py index a673d64614d786..4124e380a3d73d 100644 --- a/src/transformers/models/clvp/modeling_clvp.py +++ b/src/transformers/models/clvp/modeling_clvp.py @@ -1513,19 +1513,19 @@ def __init__(self, config: ClvpConfig): super().__init__(config) if not isinstance(config.text_config, ClvpEncoderConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type `ClvpEncoderConfig` but is of type" f" {type(config.text_config)}." ) if not isinstance(config.speech_config, ClvpEncoderConfig): - raise ValueError( + raise TypeError( "config.speech_config is expected to be of type `ClvpEncoderConfig` but is of type" f" {type(config.speech_config)}." ) if not isinstance(config.decoder_config, ClvpDecoderConfig): - raise ValueError( + raise TypeError( "config.decoder_config is expected to be of type `ClvpDecoderConfig` but is of type" f" {type(config.decoder_config)}." ) diff --git a/src/transformers/models/deberta_v2/tokenization_deberta_v2.py b/src/transformers/models/deberta_v2/tokenization_deberta_v2.py index 2876ac7660493c..6ff689f80a5c1b 100644 --- a/src/transformers/models/deberta_v2/tokenization_deberta_v2.py +++ b/src/transformers/models/deberta_v2/tokenization_deberta_v2.py @@ -518,4 +518,4 @@ def convert_to_unicode(text): elif isinstance(text, bytes): return text.decode("utf-8", "ignore") else: - raise ValueError(f"Unsupported string type: {type(text)}") + raise TypeError(f"Unsupported string type: {type(text)}") diff --git a/src/transformers/models/depth_anything/modeling_depth_anything.py b/src/transformers/models/depth_anything/modeling_depth_anything.py index 0b1ef77c6a732a..e37f0a3eaf7cbf 100644 --- a/src/transformers/models/depth_anything/modeling_depth_anything.py +++ b/src/transformers/models/depth_anything/modeling_depth_anything.py @@ -298,7 +298,7 @@ def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_wi List of hidden states from the backbone. """ if not isinstance(hidden_states, (tuple, list)): - raise ValueError("hidden_states should be a tuple or list of tensors") + raise TypeError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") diff --git a/src/transformers/models/dpt/modeling_dpt.py b/src/transformers/models/dpt/modeling_dpt.py index b2b88855669a76..b3e4b86a2a49dc 100755 --- a/src/transformers/models/dpt/modeling_dpt.py +++ b/src/transformers/models/dpt/modeling_dpt.py @@ -1002,7 +1002,7 @@ def forward(self, hidden_states: List[torch.Tensor], patch_height=None, patch_wi List of hidden states from the backbone. """ if not isinstance(hidden_states, (tuple, list)): - raise ValueError("hidden_states should be a tuple or list of tensors") + raise TypeError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") diff --git a/src/transformers/models/esm/openfold_utils/chunk_utils.py b/src/transformers/models/esm/openfold_utils/chunk_utils.py index 301721d135ee4d..16131b8590954b 100644 --- a/src/transformers/models/esm/openfold_utils/chunk_utils.py +++ b/src/transformers/models/esm/openfold_utils/chunk_utils.py @@ -32,7 +32,7 @@ def _fetch_dims(tree: Union[dict, list, tuple, torch.Tensor]) -> List[Tuple[int, elif isinstance(tree, torch.Tensor): shapes.append(tree.shape) else: - raise ValueError("Not supported") + raise TypeError("Not supported") return shapes @@ -302,7 +302,7 @@ def assign(d1: dict, d2: dict) -> None: else: out[i : i + chunk_size] = output_chunk else: - raise ValueError("Not supported") + raise TypeError("Not supported") i += chunk_size diff --git a/src/transformers/models/esm/openfold_utils/residue_constants.py b/src/transformers/models/esm/openfold_utils/residue_constants.py index 8f0ad3b50c6505..200e0d421b8386 100644 --- a/src/transformers/models/esm/openfold_utils/residue_constants.py +++ b/src/transformers/models/esm/openfold_utils/residue_constants.py @@ -394,7 +394,7 @@ def map_structure_with_atom_order(in_list: list, first_call: bool = True) -> lis elif isinstance(in_list[i], str): in_list[i] = atom_order[in_list[i]] else: - raise ValueError("Unexpected type when mapping nested lists!") + raise TypeError("Unexpected type when mapping nested lists!") return in_list diff --git a/src/transformers/models/esm/openfold_utils/tensor_utils.py b/src/transformers/models/esm/openfold_utils/tensor_utils.py index 20ee34b236f177..efe72e4905b81f 100644 --- a/src/transformers/models/esm/openfold_utils/tensor_utils.py +++ b/src/transformers/models/esm/openfold_utils/tensor_utils.py @@ -134,7 +134,7 @@ def tree_map(fn, tree, leaf_type): return fn(tree) else: print(type(tree)) - raise ValueError("Not supported") + raise TypeError("Not supported") tensor_tree_map = partial(tree_map, leaf_type=torch.Tensor) diff --git a/src/transformers/models/flava/modeling_flava.py b/src/transformers/models/flava/modeling_flava.py index dbc4e51703847a..314925789ce1f4 100644 --- a/src/transformers/models/flava/modeling_flava.py +++ b/src/transformers/models/flava/modeling_flava.py @@ -1181,19 +1181,19 @@ def __init__(self, config: FlavaConfig): super().__init__(config) if not isinstance(config.text_config, FlavaTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type FlavaTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.image_config, FlavaImageConfig): - raise ValueError( + raise TypeError( "config.image_config is expected to be of type FlavaImageConfig but is of type" f" {type(config.image_config)}." ) if not isinstance(config.multimodal_config, FlavaMultimodalConfig): - raise ValueError( + raise TypeError( "config.multimodal_config is expected to be of type FlavaMultimodalConfig but " + f"is of type {type(config.multimodal_config)}." ) diff --git a/src/transformers/models/groupvit/modeling_groupvit.py b/src/transformers/models/groupvit/modeling_groupvit.py index 32e1d777cb7dad..2a0d4f3c0e4e2b 100644 --- a/src/transformers/models/groupvit/modeling_groupvit.py +++ b/src/transformers/models/groupvit/modeling_groupvit.py @@ -1302,13 +1302,13 @@ def __init__(self, config: GroupViTConfig): super().__init__(config) if not isinstance(config.text_config, GroupViTTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type GroupViTTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, GroupViTVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type GroupViTVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/groupvit/modeling_tf_groupvit.py b/src/transformers/models/groupvit/modeling_tf_groupvit.py index f06c5f57f83fb3..b5838a5264f69d 100644 --- a/src/transformers/models/groupvit/modeling_tf_groupvit.py +++ b/src/transformers/models/groupvit/modeling_tf_groupvit.py @@ -1443,13 +1443,13 @@ def __init__(self, config: GroupViTConfig, **kwargs): super().__init__(**kwargs) if not isinstance(config.text_config, GroupViTTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type GroupViTTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, GroupViTVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type GroupViTVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/llava_next/image_processing_llava_next.py b/src/transformers/models/llava_next/image_processing_llava_next.py index 6295fb9562458b..f744b9fcf9c1cd 100644 --- a/src/transformers/models/llava_next/image_processing_llava_next.py +++ b/src/transformers/models/llava_next/image_processing_llava_next.py @@ -513,7 +513,7 @@ def get_image_patches( List[np.array]: A list of NumPy arrays containing the processed image patches. """ if not isinstance(grid_pinpoints, list): - raise ValueError("grid_pinpoints must be a list of possible resolutions.") + raise TypeError("grid_pinpoints must be a list of possible resolutions.") possible_resolutions = grid_pinpoints diff --git a/src/transformers/models/llava_next/modeling_llava_next.py b/src/transformers/models/llava_next/modeling_llava_next.py index 23e3c25025fcb6..5b897b817330b7 100644 --- a/src/transformers/models/llava_next/modeling_llava_next.py +++ b/src/transformers/models/llava_next/modeling_llava_next.py @@ -60,12 +60,12 @@ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): tuple: The shape of the image patch grid in the format (width, height). """ if not isinstance(grid_pinpoints, list): - raise ValueError("grid_pinpoints should be a list of tuples or lists") + raise TypeError("grid_pinpoints should be a list of tuples or lists") # ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate if not isinstance(image_size, (list, tuple)): if not isinstance(image_size, (torch.Tensor, np.ndarray)): - raise ValueError( + raise TypeError( f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor" ) image_size = image_size.tolist() @@ -91,12 +91,12 @@ def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int): int: the number of patches """ if not isinstance(grid_pinpoints, list): - raise ValueError("grid_pinpoints should be a list of tuples or lists") + raise TypeError("grid_pinpoints should be a list of tuples or lists") # ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate if not isinstance(image_size, (list, tuple)): if not isinstance(image_size, (torch.Tensor, np.ndarray)): - raise ValueError(f"image_size invalid type {type(image_size)} with value {image_size}") + raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}") image_size = image_size.tolist() best_resolution = select_best_resolution(image_size, grid_pinpoints) diff --git a/src/transformers/models/llava_next_video/modeling_llava_next_video.py b/src/transformers/models/llava_next_video/modeling_llava_next_video.py index 30b6abdf8e9f44..f2ccb99e618753 100644 --- a/src/transformers/models/llava_next_video/modeling_llava_next_video.py +++ b/src/transformers/models/llava_next_video/modeling_llava_next_video.py @@ -66,12 +66,12 @@ def get_anyres_image_grid_shape(image_size, grid_pinpoints, patch_size): tuple: The shape of the image patch grid in the format (width, height). """ if not isinstance(grid_pinpoints, list): - raise ValueError("grid_pinpoints should be a list of tuples or lists") + raise TypeError("grid_pinpoints should be a list of tuples or lists") # ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate if not isinstance(image_size, (list, tuple)): if not isinstance(image_size, (torch.Tensor, np.ndarray)): - raise ValueError( + raise TypeError( f"image_size invalid type: {type(image_size)} not valid, should be either list, tuple, np.ndarray or tensor" ) image_size = image_size.tolist() @@ -97,12 +97,12 @@ def image_size_to_num_patches(image_size, grid_pinpoints, patch_size: int): int: the number of patches """ if not isinstance(grid_pinpoints, list): - raise ValueError("grid_pinpoints should be a list of tuples or lists") + raise TypeError("grid_pinpoints should be a list of tuples or lists") # ! VERY IMPORTANT if image_size is tensor, must convert to into tuple, otherwise it will cause wrong calculate if not isinstance(image_size, (list, tuple)): if not isinstance(image_size, (torch.Tensor, np.ndarray)): - raise ValueError(f"image_size invalid type {type(image_size)} with value {image_size}") + raise TypeError(f"image_size invalid type {type(image_size)} with value {image_size}") image_size = image_size.tolist() best_resolution = select_best_resolution(image_size, grid_pinpoints) diff --git a/src/transformers/models/luke/tokenization_luke.py b/src/transformers/models/luke/tokenization_luke.py index d37258f2a40012..1a570992ffb406 100644 --- a/src/transformers/models/luke/tokenization_luke.py +++ b/src/transformers/models/luke/tokenization_luke.py @@ -889,7 +889,7 @@ def _batch_encode_plus( def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]): if not isinstance(entity_spans, list): - raise ValueError("entity_spans should be given as a list") + raise TypeError("entity_spans should be given as a list") elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple): raise ValueError( "entity_spans should be given as a list of tuples containing the start and end character indices" diff --git a/src/transformers/models/mluke/tokenization_mluke.py b/src/transformers/models/mluke/tokenization_mluke.py index 004f6526f5f421..3ac8191402af90 100644 --- a/src/transformers/models/mluke/tokenization_mluke.py +++ b/src/transformers/models/mluke/tokenization_mluke.py @@ -721,7 +721,7 @@ def _batch_encode_plus( # Copied from transformers.models.luke.tokenization_luke.LukeTokenizer._check_entity_input_format def _check_entity_input_format(self, entities: Optional[EntityInput], entity_spans: Optional[EntitySpanInput]): if not isinstance(entity_spans, list): - raise ValueError("entity_spans should be given as a list") + raise TypeError("entity_spans should be given as a list") elif len(entity_spans) > 0 and not isinstance(entity_spans[0], tuple): raise ValueError( "entity_spans should be given as a list of tuples containing the start and end character indices" diff --git a/src/transformers/models/owlv2/modeling_owlv2.py b/src/transformers/models/owlv2/modeling_owlv2.py index 0c4b60a4f5ec79..bc6735ff86b562 100644 --- a/src/transformers/models/owlv2/modeling_owlv2.py +++ b/src/transformers/models/owlv2/modeling_owlv2.py @@ -1015,13 +1015,13 @@ def __init__(self, config: Owlv2Config): super().__init__(config) if not isinstance(config.text_config, Owlv2TextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type Owlv2TextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, Owlv2VisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type Owlv2VisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/owlvit/modeling_owlvit.py b/src/transformers/models/owlvit/modeling_owlvit.py index 89d92c2209a143..94b815985878a0 100644 --- a/src/transformers/models/owlvit/modeling_owlvit.py +++ b/src/transformers/models/owlvit/modeling_owlvit.py @@ -998,13 +998,13 @@ def __init__(self, config: OwlViTConfig): super().__init__(config) if not isinstance(config.text_config, OwlViTTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type OwlViTTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, OwlViTVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type OwlViTVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/rag/retrieval_rag.py b/src/transformers/models/rag/retrieval_rag.py index a448132300d338..b9ae49b5e9c1aa 100644 --- a/src/transformers/models/rag/retrieval_rag.py +++ b/src/transformers/models/rag/retrieval_rag.py @@ -204,7 +204,7 @@ def __init__(self, vector_size, dataset, index_initialized=False): def _check_dataset_format(self, with_index: bool): if not isinstance(self.dataset, Dataset): - raise ValueError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}") + raise TypeError(f"Dataset should be a datasets.Dataset object, but got {type(self.dataset)}") if len({"title", "text", "embeddings"} - set(self.dataset.column_names)) > 0: raise ValueError( "Dataset should be a dataset with the following columns: " diff --git a/src/transformers/models/siglip/modeling_siglip.py b/src/transformers/models/siglip/modeling_siglip.py index 7c15dea3876b9f..797a8fa0c0ef66 100644 --- a/src/transformers/models/siglip/modeling_siglip.py +++ b/src/transformers/models/siglip/modeling_siglip.py @@ -1202,13 +1202,13 @@ def __init__(self, config: SiglipConfig): super().__init__(config) if not isinstance(config.text_config, SiglipTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type SiglipTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, SiglipVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type SiglipVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/udop/configuration_udop.py b/src/transformers/models/udop/configuration_udop.py index bc1765e289c6a1..5ae8bcebfd79a2 100644 --- a/src/transformers/models/udop/configuration_udop.py +++ b/src/transformers/models/udop/configuration_udop.py @@ -135,7 +135,7 @@ def __init__( self.patch_size = patch_size self.num_channels = num_channels if not isinstance(relative_bias_args, list): - raise ValueError("`relative_bias_args` should be a list of dictionaries.") + raise TypeError("`relative_bias_args` should be a list of dictionaries.") self.relative_bias_args = relative_bias_args act_info = self.feed_forward_proj.split("-") diff --git a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py index 410fe710194e9f..0081008009e3a0 100644 --- a/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py +++ b/src/transformers/models/wav2vec2_with_lm/processing_wav2vec2_with_lm.py @@ -92,7 +92,7 @@ def __init__( super().__init__(feature_extractor, tokenizer) if not isinstance(decoder, BeamSearchDecoderCTC): - raise ValueError(f"`decoder` has to be of type {BeamSearchDecoderCTC.__class__}, but is {type(decoder)}") + raise TypeError(f"`decoder` has to be of type {BeamSearchDecoderCTC.__class__}, but is {type(decoder)}") if feature_extractor.__class__.__name__ not in ["Wav2Vec2FeatureExtractor", "SeamlessM4TFeatureExtractor"]: raise ValueError( diff --git a/src/transformers/models/x_clip/modeling_x_clip.py b/src/transformers/models/x_clip/modeling_x_clip.py index b00b42281b916a..791e501d173721 100644 --- a/src/transformers/models/x_clip/modeling_x_clip.py +++ b/src/transformers/models/x_clip/modeling_x_clip.py @@ -1242,13 +1242,13 @@ def __init__(self, config: XCLIPConfig): super().__init__(config) if not isinstance(config.text_config, XCLIPTextConfig): - raise ValueError( + raise TypeError( "config.text_config is expected to be of type XCLIPTextConfig but is of type" f" {type(config.text_config)}." ) if not isinstance(config.vision_config, XCLIPVisionConfig): - raise ValueError( + raise TypeError( "config.vision_config is expected to be of type XCLIPVisionConfig but is of type" f" {type(config.vision_config)}." ) diff --git a/src/transformers/models/zoedepth/modeling_zoedepth.py b/src/transformers/models/zoedepth/modeling_zoedepth.py index f03f775d1e4faf..2a00487c1b4b90 100644 --- a/src/transformers/models/zoedepth/modeling_zoedepth.py +++ b/src/transformers/models/zoedepth/modeling_zoedepth.py @@ -334,7 +334,7 @@ def forward(self, hidden_states: List[torch.Tensor], patch_height, patch_width) List of hidden states from the backbone. """ if not isinstance(hidden_states, (tuple, list)): - raise ValueError("hidden_states should be a tuple or list of tensors") + raise TypeError("hidden_states should be a tuple or list of tensors") if len(hidden_states) != len(self.config.neck_hidden_sizes): raise ValueError("The number of hidden states should be equal to the number of neck hidden sizes.") diff --git a/src/transformers/pipelines/audio_classification.py b/src/transformers/pipelines/audio_classification.py index a0e8f626db644e..517fbd9a7f409f 100644 --- a/src/transformers/pipelines/audio_classification.py +++ b/src/transformers/pipelines/audio_classification.py @@ -190,7 +190,7 @@ def preprocess(self, inputs): ).numpy() if not isinstance(inputs, np.ndarray): - raise ValueError("We expect a numpy ndarray as input") + raise TypeError("We expect a numpy ndarray as input") if len(inputs.shape) != 1: raise ValueError("We expect a single channel audio input for AudioClassificationPipeline") diff --git a/src/transformers/pipelines/automatic_speech_recognition.py b/src/transformers/pipelines/automatic_speech_recognition.py index 01faab6d74adac..f3de341d88954c 100644 --- a/src/transformers/pipelines/automatic_speech_recognition.py +++ b/src/transformers/pipelines/automatic_speech_recognition.py @@ -406,7 +406,7 @@ def preprocess(self, inputs, chunk_length_s=0, stride_length_s=None): # of the original length in the stride so we can cut properly. stride = (inputs.shape[0], int(round(stride[0] * ratio)), int(round(stride[1] * ratio))) if not isinstance(inputs, np.ndarray): - raise ValueError(f"We expect a numpy ndarray as input, got `{type(inputs)}`") + raise TypeError(f"We expect a numpy ndarray as input, got `{type(inputs)}`") if len(inputs.shape) != 1: raise ValueError("We expect a single channel audio input for AutomaticSpeechRecognitionPipeline") diff --git a/src/transformers/pipelines/zero_shot_audio_classification.py b/src/transformers/pipelines/zero_shot_audio_classification.py index d9109aebd9c529..59500d14e104e7 100644 --- a/src/transformers/pipelines/zero_shot_audio_classification.py +++ b/src/transformers/pipelines/zero_shot_audio_classification.py @@ -114,7 +114,7 @@ def preprocess(self, audio, candidate_labels=None, hypothesis_template="This is audio = ffmpeg_read(audio, self.feature_extractor.sampling_rate) if not isinstance(audio, np.ndarray): - raise ValueError("We expect a numpy ndarray as input") + raise TypeError("We expect a numpy ndarray as input") if len(audio.shape) != 1: raise ValueError("We expect a single channel audio input for ZeroShotAudioClassificationPipeline") diff --git a/src/transformers/processing_utils.py b/src/transformers/processing_utils.py index 24c6af79663652..a699ce94673618 100644 --- a/src/transformers/processing_utils.py +++ b/src/transformers/processing_utils.py @@ -356,7 +356,7 @@ def __init__(self, *args, **kwargs): proper_class = getattr(transformers_module, class_name) if not isinstance(arg, proper_class): - raise ValueError( + raise TypeError( f"Received a {type(arg).__name__} for argument {attribute_name}, but a {class_name} was expected." ) diff --git a/src/transformers/tokenization_utils.py b/src/transformers/tokenization_utils.py index 4231526265ba59..1853d2de4560ea 100644 --- a/src/transformers/tokenization_utils.py +++ b/src/transformers/tokenization_utils.py @@ -474,7 +474,7 @@ def added_tokens_decoder(self, value: Dict[int, Union[AddedToken, str]]) -> Dict # Always raise an error if string because users should define the behavior for index, token in value.items(): if not isinstance(token, (str, AddedToken)) or not isinstance(index, int): - raise ValueError( + raise TypeError( f"The provided `added_tokens_decoder` has an element of type {index.__class__, token.__class__}, should be a dict of {int, Union[AddedToken, str]}" ) diff --git a/src/transformers/utils/quantization_config.py b/src/transformers/utils/quantization_config.py index 20d142b83f46c2..506c4db447c7aa 100755 --- a/src/transformers/utils/quantization_config.py +++ b/src/transformers/utils/quantization_config.py @@ -405,7 +405,7 @@ def load_in_4bit(self): @load_in_4bit.setter def load_in_4bit(self, value: bool): if not isinstance(value, bool): - raise ValueError("load_in_4bit must be a boolean") + raise TypeError("load_in_4bit must be a boolean") if self.load_in_8bit and value: raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") @@ -418,7 +418,7 @@ def load_in_8bit(self): @load_in_8bit.setter def load_in_8bit(self, value: bool): if not isinstance(value, bool): - raise ValueError("load_in_8bit must be a boolean") + raise TypeError("load_in_8bit must be a boolean") if self.load_in_4bit and value: raise ValueError("load_in_4bit and load_in_8bit are both True, but only one can be used at the same time") @@ -429,30 +429,30 @@ def post_init(self): Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.load_in_4bit, bool): - raise ValueError("load_in_4bit must be a boolean") + raise TypeError("load_in_4bit must be a boolean") if not isinstance(self.load_in_8bit, bool): - raise ValueError("load_in_8bit must be a boolean") + raise TypeError("load_in_8bit must be a boolean") if not isinstance(self.llm_int8_threshold, float): - raise ValueError("llm_int8_threshold must be a float") + raise TypeError("llm_int8_threshold must be a float") if self.llm_int8_skip_modules is not None and not isinstance(self.llm_int8_skip_modules, list): - raise ValueError("llm_int8_skip_modules must be a list of strings") + raise TypeError("llm_int8_skip_modules must be a list of strings") if not isinstance(self.llm_int8_enable_fp32_cpu_offload, bool): - raise ValueError("llm_int8_enable_fp32_cpu_offload must be a boolean") + raise TypeError("llm_int8_enable_fp32_cpu_offload must be a boolean") if not isinstance(self.llm_int8_has_fp16_weight, bool): - raise ValueError("llm_int8_has_fp16_weight must be a boolean") + raise TypeError("llm_int8_has_fp16_weight must be a boolean") if self.bnb_4bit_compute_dtype is not None and not isinstance(self.bnb_4bit_compute_dtype, torch.dtype): - raise ValueError("bnb_4bit_compute_dtype must be torch.dtype") + raise TypeError("bnb_4bit_compute_dtype must be torch.dtype") if not isinstance(self.bnb_4bit_quant_type, str): - raise ValueError("bnb_4bit_quant_type must be a string") + raise TypeError("bnb_4bit_quant_type must be a string") if not isinstance(self.bnb_4bit_use_double_quant, bool): - raise ValueError("bnb_4bit_use_double_quant must be a boolean") + raise TypeError("bnb_4bit_use_double_quant must be a boolean") if self.load_in_4bit and not version.parse(importlib.metadata.version("bitsandbytes")) >= version.parse( "0.39.0" @@ -957,13 +957,13 @@ def post_init(self): Safety checker that arguments are correct - also replaces some NoneType arguments with their default values. """ if not isinstance(self.in_group_size, int): - raise ValueError("in_group_size must be a float") + raise TypeError("in_group_size must be a float") if not isinstance(self.out_group_size, int): - raise ValueError("out_group_size must be a float") + raise TypeError("out_group_size must be a float") if not isinstance(self.num_codebooks, int): - raise ValueError("num_codebooks must be a float") + raise TypeError("num_codebooks must be a float") if not isinstance(self.nbits_per_codebook, int): - raise ValueError("nbits_per_codebook must be a float") + raise TypeError("nbits_per_codebook must be a float") if self.linear_weights_not_to_quantize is not None and not isinstance( self.linear_weights_not_to_quantize, list diff --git a/tests/agents/test_tools_common.py b/tests/agents/test_tools_common.py index bd560e9053679c..bb8881d92e915d 100644 --- a/tests/agents/test_tools_common.py +++ b/tests/agents/test_tools_common.py @@ -60,7 +60,7 @@ def output_type(output): elif isinstance(output, (torch.Tensor, AgentAudio)): return "audio" else: - raise ValueError(f"Invalid output: {output}") + raise TypeError(f"Invalid output: {output}") @is_agent_test diff --git a/tests/models/luke/test_tokenization_luke.py b/tests/models/luke/test_tokenization_luke.py index ae6db98eaf8b6d..a7b544d4608d71 100644 --- a/tests/models/luke/test_tokenization_luke.py +++ b/tests/models/luke/test_tokenization_luke.py @@ -188,7 +188,7 @@ def test_if_tokenize_single_text_raise_error_with_invalid_inputs(self): with self.assertRaises(ValueError): tokenizer(sentence, entities=tuple(entities), entity_spans=spans) - with self.assertRaises(ValueError): + with self.assertRaises(TypeError): tokenizer(sentence, entities=entities, entity_spans=tuple(spans)) with self.assertRaises(ValueError): diff --git a/tests/models/mluke/test_tokenization_mluke.py b/tests/models/mluke/test_tokenization_mluke.py index edb62a791c0245..bc9210c9139f62 100644 --- a/tests/models/mluke/test_tokenization_mluke.py +++ b/tests/models/mluke/test_tokenization_mluke.py @@ -151,7 +151,7 @@ def test_if_tokenize_single_text_raise_error_with_invalid_inputs(self): with self.assertRaises(ValueError): tokenizer(sentence, entities=tuple(entities), entity_spans=spans) - with self.assertRaises(ValueError): + with self.assertRaises(TypeError): tokenizer(sentence, entities=entities, entity_spans=tuple(spans)) with self.assertRaises(ValueError): diff --git a/tests/pipelines/test_pipelines_feature_extraction.py b/tests/pipelines/test_pipelines_feature_extraction.py index 4d25941c3f0fd9..c9169056ff97a7 100644 --- a/tests/pipelines/test_pipelines_feature_extraction.py +++ b/tests/pipelines/test_pipelines_feature_extraction.py @@ -171,7 +171,7 @@ def get_shape(self, input_, shape=None): elif isinstance(input_, float): return 0 else: - raise ValueError("We expect lists of floats, nothing else") + raise TypeError("We expect lists of floats, nothing else") return shape def get_test_pipeline(self, model, tokenizer, processor, torch_dtype="float32"): diff --git a/tests/test_pipeline_mixin.py b/tests/test_pipeline_mixin.py index 6ca7ea0681db58..8a0ca08e8dabec 100644 --- a/tests/test_pipeline_mixin.py +++ b/tests/test_pipeline_mixin.py @@ -145,7 +145,7 @@ def run_task_tests(self, task, torch_dtype="float32"): if not isinstance(model_architectures, tuple): model_architectures = (model_architectures,) if not isinstance(model_architectures, tuple): - raise ValueError(f"`model_architectures` must be a tuple. Got {type(model_architectures)} instead.") + raise TypeError(f"`model_architectures` must be a tuple. Got {type(model_architectures)} instead.") for model_architecture in model_architectures: model_arch_name = model_architecture.__name__