Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add option for subclasses to convert model and tokenizer in hf checkpointer #1121

Merged
merged 3 commits into from
Apr 19, 2024
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 22 additions & 1 deletion llmfoundry/callbacks/hf_checkpointer.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import time
from multiprocessing.context import SpawnProcess
from pathlib import Path
from typing import Any, Dict, List, Optional, Sequence, Union
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union

import torch
import torch.nn as nn
Expand Down Expand Up @@ -273,6 +273,23 @@ def _all_child_processes_done(self) -> bool:
dist.all_reduce(x, reduce_operation='MAX')
return x.item() == 0

def transform_model_and_tokenizer(
self, model: PreTrainedModel, tokenizer: PreTrainedTokenizerBase
) -> Tuple[PreTrainedModel, PreTrainedTokenizerBase]:
"""Transform the model and tokenizer before saving.

This allows a subclass to modify the model and tokenizer before saving. The base class implementation will
make no modifications.

Args:
model (PreTrainedModel): The model to be transformed.
tokenizer (PreTrainedTokenizerBase): The tokenizer to be transformed.

Returns:
Tuple[PreTrainedModel, PreTrainedTokenizerBase]: The transformed model and tokenizer.
"""
return model, tokenizer

def _save_checkpoint(self, state: State, logger: Logger):
del logger # unused

Expand Down Expand Up @@ -405,6 +422,10 @@ def dtensor_to_tensor_hook(
new_model_instance.load_state_dict(state_dict, assign=True)
del state_dict

# Transform the model and tokenizer before saving
new_model_instance, original_tokenizer = self.transform_model_and_tokenizer(
new_model_instance, original_tokenizer)

log.debug('Saving Hugging Face checkpoint to disk')
new_model_instance.save_pretrained(temp_save_dir)
if original_tokenizer is not None:
Expand Down
Loading