Skip to content

Commit

Permalink
Deprecate and bump version to 0.22 (#3230)
Browse files Browse the repository at this point in the history
* deprecate and bump version

* lint

* remove deprecatio test

---------

Co-authored-by: Daniel King <43149077+dakinggg@users.noreply.github.com>
  • Loading branch information
2 people authored and Chuck Tang committed May 16, 2024
1 parent 2255209 commit 3e85571
Show file tree
Hide file tree
Showing 6 changed files with 10 additions and 43 deletions.
2 changes: 1 addition & 1 deletion composer/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,4 +3,4 @@

"""The Composer Version."""

__version__ = '0.22.0.dev0'
__version__ = '0.23.0.dev0'
13 changes: 0 additions & 13 deletions composer/models/huggingface.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
from composer.devices import DeviceCPU
from composer.models.base import ComposerModel
from composer.utils import MissingConditionalImportError, dist, get_file, import_object, is_model_fsdp, safe_torch_load
from composer.utils.warnings import VersionedDeprecationWarning

try:
from peft import PeftModel, get_peft_model
Expand Down Expand Up @@ -510,18 +509,6 @@ def eval_forward(self, batch, outputs: Optional[Any] = None):
'Generation eval cannot be used without providing a tokenizer to the model constructor.',
)

if 'generation_length' in batch:
warnings.warn(
VersionedDeprecationWarning(
'`generation_length` has been deprecated in favor of passing `max_new_tokens` directly into `generation_kwargs`.',
remove_version='0.22.0',
),
)
if 'generation_kwargs' in batch:
batch['generation_kwargs']['max_new_tokens'] = batch['generation_length']
else:
batch['generation_kwargs'] = {'max_new_tokens': batch['generation_length']}

self.labels = batch.pop('labels')
generation = self.generate(
batch['input_ids'],
Expand Down
4 changes: 2 additions & 2 deletions docker/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ all dependencies for both NLP and Vision models. They are built on top of the
<!-- BEGIN_COMPOSER_BUILD_MATRIX -->
| Composer Version | CUDA Support | Docker Tag |
|--------------------|----------------|----------------------------------------------------------------|
| 0.21.3 | Yes | `mosaicml/composer:latest`, `mosaicml/composer:0.21.3` |
| 0.21.3 | No | `mosaicml/composer:latest_cpu`, `mosaicml/composer:0.21.3_cpu` |
| 0.22.0 | Yes | `mosaicml/composer:latest`, `mosaicml/composer:0.22.0` |
| 0.22.0 | No | `mosaicml/composer:latest_cpu`, `mosaicml/composer:0.22.0_cpu` |
<!-- END_COMPOSER_BUILD_MATRIX -->

**Note**: For a lightweight installation, we recommended using a [MosaicML PyTorch Image](#pytorch-images) and manually
Expand Down
12 changes: 6 additions & 6 deletions docker/build_matrix.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -205,9 +205,9 @@
TORCHVISION_VERSION: 0.16.2
- AWS_OFI_NCCL_VERSION: ''
BASE_IMAGE: nvidia/cuda:12.1.1-cudnn8-devel-ubuntu20.04
COMPOSER_INSTALL_COMMAND: mosaicml[all]==0.21.3
COMPOSER_INSTALL_COMMAND: mosaicml[all]==0.22.0
CUDA_VERSION: 12.1.1
IMAGE_NAME: composer-0-21-3
IMAGE_NAME: composer-0-22-0
MOFED_VERSION: 5.5-1.0.3.2
NVIDIA_REQUIRE_CUDA_OVERRIDE: cuda>=12.1 brand=tesla,driver>=450,driver<451 brand=tesla,driver>=470,driver<471
brand=unknown,driver>=470,driver<471 brand=nvidia,driver>=470,driver<471 brand=nvidiartx,driver>=470,driver<471
Expand All @@ -228,23 +228,23 @@
PYTORCH_NIGHTLY_VERSION: ''
PYTORCH_VERSION: 2.2.1
TAGS:
- mosaicml/composer:0.21.3
- mosaicml/composer:0.22.0
- mosaicml/composer:latest
TARGET: composer_stage
TORCHVISION_VERSION: 0.17.1
- AWS_OFI_NCCL_VERSION: ''
BASE_IMAGE: ubuntu:20.04
COMPOSER_INSTALL_COMMAND: mosaicml[all]==0.21.3
COMPOSER_INSTALL_COMMAND: mosaicml[all]==0.22.0
CUDA_VERSION: ''
IMAGE_NAME: composer-0-21-3-cpu
IMAGE_NAME: composer-0-22-0-cpu
MOFED_VERSION: 5.5-1.0.3.2
NVIDIA_REQUIRE_CUDA_OVERRIDE: ''
PYTHON_VERSION: '3.10'
PYTORCH_NIGHTLY_URL: ''
PYTORCH_NIGHTLY_VERSION: ''
PYTORCH_VERSION: 2.2.1
TAGS:
- mosaicml/composer:0.21.3_cpu
- mosaicml/composer:0.22.0_cpu
- mosaicml/composer:latest_cpu
TARGET: composer_stage
TORCHVISION_VERSION: 0.17.1
2 changes: 1 addition & 1 deletion docker/generate_build_matrix.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ def _main():
composer_entries = []

# The `GIT_COMMIT` is a placeholder and Jenkins will substitute it with the actual git commit for the `composer_staging` images
composer_versions = ['0.21.3'] # Only build images for the latest composer version
composer_versions = ['0.22.0'] # Only build images for the latest composer version
composer_python_versions = [PRODUCTION_PYTHON_VERSION] # just build composer against the latest

for product in itertools.product(composer_python_versions, composer_versions, cuda_options):
Expand Down
20 changes: 0 additions & 20 deletions tests/models/test_hf_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -1327,26 +1327,6 @@ def test_eval_forward_generate(device, world_size, hf_model, hf_tokenizer, use_f
assert all(isinstance(decoded_generation, str) for decoded_generation in generation2)


def test_eval_forward_generate_adjust_generation_length(tiny_gpt2_model, tiny_gpt2_tokenizer):
model = HuggingFaceModel(tiny_gpt2_model, tokenizer=tiny_gpt2_tokenizer, use_logits=True)
input_dict = tiny_gpt2_tokenizer(['hello', 'goodbyes'], return_tensors='pt', padding=True)

input_dict['mode'] = 'generate'
input_dict['generation_kwargs'] = {}
input_dict['generation_length'] = 5
input_dict['labels'] = [['answer1'], ['answer2']]
with pytest.warns(DeprecationWarning):
generation1 = model.eval_forward(input_dict, None)

input_dict['generation_length'] = 3
input_dict['labels'] = [['answer1'], ['answer2']]
generation2 = model.eval_forward(input_dict, None)

assert len(generation1) == len(generation2) == 2
assert all(isinstance(decoded_generation, str) for decoded_generation in generation1)
assert all(isinstance(decoded_generation, str) for decoded_generation in generation2)


@pytest.mark.parametrize('peft_type', ['LORA', 'loRa'])
@pytest.mark.parametrize('task_type', ['CAUSAL_LM', 'causal_lm'])
def test_peft_init(peft_type: str, task_type: str, tiny_gpt2_model, gpt2_peft_config):
Expand Down

0 comments on commit 3e85571

Please sign in to comment.