Skip to content

Commit

Permalink
Modify resize_token_embeddings to ensure output type is same as input (
Browse files Browse the repository at this point in the history
…#31979)

* Change resize_token_embeddings to make it return same Class that is passed to it

* Add explanatory comment as requested in review

* Add explanatory comments for add resizing function in lxmert

* Add comment for padding_idx and moving _resize_bias in lxmert to LxmertForPreTraining

---------

Co-authored-by: Prashanth Sateesh <prasatee@Prashanths-MBP.attlocal.net>
Co-authored-by: Prashanth Sateesh <prasatee@Prashanths-MacBook-Pro.local>
  • Loading branch information
3 people authored Jul 23, 2024
1 parent 1535a2c commit 5a4a76e
Show file tree
Hide file tree
Showing 3 changed files with 33 additions and 1 deletion.
13 changes: 12 additions & 1 deletion src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -2128,7 +2128,18 @@ def _get_resized_embeddings(
else:
new_embeddings.weight.data[:n, :] = old_embeddings.weight.data[:n, :]

return new_embeddings
# Replace weights in old_embeddings and return to maintain the same embedding type.
# This ensures correct functionality when a Custom Embedding class is passed as input.
# The input and output embedding types remain consistent. (c.f. https://github.com/huggingface/transformers/pull/31979)
old_embeddings.weight.data = new_embeddings.weight.data
old_embeddings.num_embeddings = new_embeddings.weight.data.shape[0]

# If the new number of tokens is smaller than the original `padding_idx`, the `padding_idx`
# will be set to `None` in the resized embeddings.
if old_embeddings.padding_idx is not None and (new_num_tokens - 1) < old_embeddings.padding_idx:
old_embeddings.padding_idx = None

return old_embeddings

def _get_resized_lm_head(
self, old_lm_head: nn.Linear, new_num_tokens: Optional[int] = None, transposed: Optional[bool] = False
Expand Down
16 changes: 16 additions & 0 deletions src/transformers/models/lxmert/modeling_lxmert.py
Original file line number Diff line number Diff line change
Expand Up @@ -1072,6 +1072,22 @@ def __init__(self, config):
}
self.visual_losses = visual_losses

def resize_token_embeddings(self, new_num_tokens: int, pad_to_multiple_of: Optional[int] = None) -> nn.Embedding:
# Adding the following steps to resize bias to match the shape of resized embeddings
new_embeddings = super().resize_token_embeddings(new_num_tokens, pad_to_multiple_of)
self.cls.predictions.bias = self._resize_bias(self.cls.predictions.bias, new_num_tokens)
return new_embeddings

def _resize_bias(self, bias, new_num_tokens: int):
old_num_tokens = bias.shape[0]
if new_num_tokens <= old_num_tokens:
new_bias = bias[:new_num_tokens]
else:
extra_bias = torch.zeros(new_num_tokens - old_num_tokens, device=bias.device)
new_bias = torch.cat([bias, extra_bias])
new_bias = nn.Parameter(new_bias)
return new_bias

def resize_num_qa_labels(self, num_labels):
"""
Build a resized question answering linear layer Module from a provided new linear layer. Increasing the size
Expand Down
5 changes: 5 additions & 0 deletions tests/test_modeling_common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1755,6 +1755,8 @@ def test_resize_tokens_embeddings(self):
config = copy.deepcopy(original_config)
model = model_class(config)
model.to(torch_device)
model_embed_pre_resize = model.get_input_embeddings()
type_model_embed_pre_resize = type(model_embed_pre_resize)

if self.model_tester.is_training is False:
model.eval()
Expand All @@ -1774,6 +1776,9 @@ def test_resize_tokens_embeddings(self):
self.assertEqual(new_model_vocab_size, model_vocab_size + 10)
# Check that it actually resizes the embeddings matrix
self.assertEqual(model_embed.weight.shape[0], cloned_embeddings.shape[0] + 10)
# Check to make sure the type of embeddings returned post resizing is same as type of input
type_model_embed_post_resize = type(model_embed)
self.assertEqual(type_model_embed_pre_resize, type_model_embed_post_resize)
# Check that the model can still do a forward pass successfully (every parameter should be resized)
model(**self._prepare_for_class(inputs_dict, model_class))

Expand Down

0 comments on commit 5a4a76e

Please sign in to comment.