Skip to content

Commit

Permalink
Generate: v4.42 deprecations 🧹🧹 (huggingface#31956)
Browse files Browse the repository at this point in the history
v4_42 deprecations
  • Loading branch information
gante authored and MHRDYN7 committed Jul 23, 2024
1 parent 48bbb2a commit 11aaad7
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 71 deletions.
67 changes: 4 additions & 63 deletions src/transformers/generation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2218,8 +2218,8 @@ def _dola_decoding(
stopping_criteria: StoppingCriteriaList,
generation_config: GenerationConfig,
synced_gpus: bool,
streamer: Optional["BaseStreamer"] = None,
logits_warper: Optional[LogitsProcessorList] = None,
streamer: "BaseStreamer",
logits_warper: LogitsProcessorList,
**model_kwargs,
) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
r"""
Expand Down Expand Up @@ -2818,34 +2818,6 @@ def _contrastive_search(
else:
return input_ids

def _greedy_search(
self,
input_ids: torch.LongTensor,
logits_processor: LogitsProcessorList,
stopping_criteria: StoppingCriteriaList,
generation_config: GenerationConfig,
synced_gpus: bool,
streamer: Optional["BaseStreamer"],
**model_kwargs,
) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
r"""
Deprecated. Use `._sample()` instead, passing the same arguments.
"""

logger.warning_once(
"Calling `._greedy_search()` directly is deprecated and will be removed in v4.42. Use `._sample()` "
"instead, passing the same arguments."
)
return self._sample(
input_ids=input_ids,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
generation_config=generation_config,
synced_gpus=synced_gpus,
streamer=streamer,
**model_kwargs,
)

def _sample(
self,
input_ids: torch.LongTensor,
Expand All @@ -2854,7 +2826,7 @@ def _sample(
generation_config: GenerationConfig,
synced_gpus: bool,
streamer: Optional["BaseStreamer"],
logits_warper: Optional[LogitsProcessorList] = None,
logits_warper: LogitsProcessorList,
**model_kwargs,
) -> Union[GenerateNonBeamOutput, torch.LongTensor]:
r"""
Expand Down Expand Up @@ -3053,7 +3025,6 @@ def _temporary_reorder_cache(self, past_key_values, beam_idx):
past_key_values.reorder_cache(beam_idx)
return past_key_values

# TODO (joao, v4.42): remove default for `logits_warper`
def _beam_search(
self,
input_ids: torch.LongTensor,
Expand All @@ -3062,7 +3033,7 @@ def _beam_search(
stopping_criteria: StoppingCriteriaList,
generation_config: GenerationConfig,
synced_gpus: bool,
logits_warper: Optional[LogitsProcessorList] = None,
logits_warper: LogitsProcessorList,
**model_kwargs,
) -> Union[GenerateBeamOutput, torch.LongTensor]:
r"""
Expand Down Expand Up @@ -3342,36 +3313,6 @@ def _beam_search(
else:
return sequence_outputs["sequences"]

def _beam_sample(
self,
input_ids: torch.LongTensor,
beam_scorer: BeamScorer,
logits_processor: LogitsProcessorList,
stopping_criteria: StoppingCriteriaList,
logits_warper: LogitsProcessorList,
generation_config: GenerationConfig,
synced_gpus: bool,
**model_kwargs,
) -> Union[GenerateBeamOutput, torch.LongTensor]:
r"""
Deprecated. Use `._beam_search()` instead, passing the same arguments.
"""

logger.warning_once(
"Calling `._beam_sample()` directly is deprecated and will be removed in v4.42. Use `._beam_search()` "
"instead, passing the same arguments."
)
return self._beam_search(
input_ids=input_ids,
beam_scorer=beam_scorer,
logits_processor=logits_processor,
stopping_criteria=stopping_criteria,
logits_warper=logits_warper,
generation_config=generation_config,
synced_gpus=synced_gpus,
**model_kwargs,
)

def _group_beam_search(
self,
input_ids: torch.LongTensor,
Expand Down
8 changes: 0 additions & 8 deletions src/transformers/models/llava/configuration_llava.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,6 @@
# limitations under the License.
"""Llava model configuration"""

import warnings

from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
Expand Down Expand Up @@ -96,12 +94,6 @@ def __init__(
f"Got: {vision_feature_select_strategy}"
)

if "vocab_size" in kwargs:
warnings.warn(
"The `vocab_size` argument is deprecated and will be removed in v4.42, since it can be inferred from the `text_config`. Passing this argument has no effect",
FutureWarning,
)

self.vision_feature_select_strategy = vision_feature_select_strategy
self.vision_feature_layer = vision_feature_layer

Expand Down

0 comments on commit 11aaad7

Please sign in to comment.